comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
added check and flag to avoid ending span multiple times just in case | public BinaryData getBodyAsBinaryData() {
try {
return response.getBodyAsBinaryData();
} catch (Exception e) {
tracer.end(null, e, span);
throw e;
} finally {
endNoError();
}
} | endNoError(); | public BinaryData getBodyAsBinaryData() {
try {
return response.getBodyAsBinaryData();
} catch (Exception e) {
onError(null, e);
throw e;
} finally {
endNoError();
}
} | class TraceableResponse extends HttpResponse {
private final HttpResponse response;
private final Context span;
private final Tracer tracer;
private TraceableResponse(HttpResponse response, Tracer tracer, Context span) {
super(response.getRequest());
this.response = response;
this.span = span;
this.tracer = tracer;
}
public static HttpResponse create(HttpResponse response, Tracer tracer, Context span) {
if (tracer.isRecording(span)) {
return new TraceableResponse(response, tracer, span);
}
tracer.end(null, null, span);
return response;
}
@Override
public int getStatusCode() {
return response.getStatusCode();
}
@Deprecated
@Override
public String getHeaderValue(String name) {
return response.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return response.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return response.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return Flux.using(() -> span,
s -> response.getBody()
.doOnError(e -> tracer.end(null, e, s))
.doOnCancel(() -> tracer.end(CANCELLED_ERROR_TYPE, null, s)),
s -> endNoError());
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return endSpanWhen(response.getBodyAsByteArray());
}
@Override
public Mono<String> getBodyAsString() {
return endSpanWhen(response.getBodyAsString());
}
@Override
@Override
public Mono<String> getBodyAsString(Charset charset) {
return endSpanWhen(response.getBodyAsString(charset));
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return endSpanWhen(response.getBodyAsInputStream());
}
@Override
public void close() {
response.close();
endNoError();
}
private <T> Mono<T> endSpanWhen(Mono<T> publisher) {
return Mono.using(() -> span, s -> publisher.doOnError(e -> tracer.end(null, e, s))
.doOnCancel(() -> tracer.end(CANCELLED_ERROR_TYPE, null, s)), s -> endNoError());
}
private void endNoError() {
String errorType = null;
if (response == null) {
errorType = OTHER_ERROR_TYPE;
} else if (response.getStatusCode() >= 400) {
errorType = String.valueOf(response.getStatusCode());
}
tracer.end(errorType, null, span);
}
} | class TraceableResponse extends HttpResponse {
private final HttpResponse response;
private final Context span;
private final Tracer tracer;
private volatile int ended = 0;
private static final AtomicIntegerFieldUpdater<TraceableResponse> ENDED_UPDATER
= AtomicIntegerFieldUpdater.newUpdater(TraceableResponse.class, "ended");
private TraceableResponse(HttpResponse response, Tracer tracer, Context span) {
super(response.getRequest());
this.response = response;
this.span = span;
this.tracer = tracer;
}
public static HttpResponse create(HttpResponse response, Tracer tracer, Context span) {
if (tracer.isRecording(span)) {
return new TraceableResponse(response, tracer, span);
}
tracer.end(null, null, span);
return response;
}
@Override
public int getStatusCode() {
return response.getStatusCode();
}
@Deprecated
@Override
public String getHeaderValue(String name) {
return response.getHeaderValue(name);
}
@Override
public String getHeaderValue(HttpHeaderName headerName) {
return response.getHeaderValue(headerName);
}
@Override
public HttpHeaders getHeaders() {
return response.getHeaders();
}
@Override
public Flux<ByteBuffer> getBody() {
return Flux.using(() -> span,
s -> response.getBody()
.doOnError(e -> onError(null, e))
.doOnCancel(() -> onError(CANCELLED_ERROR_TYPE, null)),
s -> endNoError());
}
@Override
public Mono<byte[]> getBodyAsByteArray() {
return endSpanWhen(response.getBodyAsByteArray());
}
@Override
public Mono<String> getBodyAsString() {
return endSpanWhen(response.getBodyAsString());
}
@Override
@Override
public Mono<String> getBodyAsString(Charset charset) {
return endSpanWhen(response.getBodyAsString(charset));
}
@Override
public Mono<InputStream> getBodyAsInputStream() {
return endSpanWhen(response.getBodyAsInputStream());
}
@Override
public void close() {
response.close();
endNoError();
}
private <T> Mono<T> endSpanWhen(Mono<T> publisher) {
return Mono.using(() -> span,
s -> publisher.doOnError(e -> onError(null, e)).doOnCancel(() -> onError(CANCELLED_ERROR_TYPE, null)),
s -> endNoError());
}
private void onError(String errorType, Throwable error) {
if (ENDED_UPDATER.compareAndSet(this, 0, 1)) {
tracer.end(errorType, error, span);
}
}
private void endNoError() {
if (ENDED_UPDATER.compareAndSet(this, 0, 1)) {
String errorType = null;
if (response == null) {
errorType = OTHER_ERROR_TYPE;
} else if (response.getStatusCode() >= 400) {
errorType = String.valueOf(response.getStatusCode());
}
tracer.end(errorType, null, span);
}
}
} |
Liudmila and I’ve been investigating a strange random CI error with the following stack trace – ``` [ForkJoinPool-1-worker-1] WARN com.azure.messaging.servicebus.WindowedSubscriber$WindowWork - {"az.sdk.message":"Terminating the work. Error while scheduling or waiting for timeout.","exception":"Scheduler unavailable","demand":1,"workId":0,"pending":1} reactor.core.Exceptions$ReactorRejectedExecutionException: Scheduler unavailable at reactor.core.Exceptions.failWithRejected(Exceptions.java:271) at reactor.core.publisher.Operators.onRejectedExecution(Operators.java:1024) at reactor.core.publisher.MonoDelay.subscribe(MonoDelay.java:64) at reactor.core.publisher.Mono.subscribe(Mono.java:4491) at reactor.core.publisher.Mono.subscribeWith(Mono.java:4606) at reactor.core.publisher.Mono.subscribe(Mono.java:4458) at reactor.core.publisher.Mono.subscribe(Mono.java:4394) at reactor.core.publisher.Mono.subscribe(Mono.java:4366) at com.azure.messaging.servicebus.WindowedSubscriber$WindowWork.beginTimeoutTimer(WindowedSubscriber.java:859) at com.azure.messaging.servicebus.WindowedSubscriber$WindowWork.init(WindowedSubscriber.java:700) at com.azure.messaging.servicebus.WindowedSubscriber$WindowWork.access$1100(WindowedSubscriber.java:576) at com.azure.messaging.servicebus.WindowedSubscriber.initWorkOnce(WindowedSubscriber.java:386) at com.azure.messaging.servicebus.WindowedSubscriber.drainLoop(WindowedSubscriber.java:264) at com.azure.messaging.servicebus.WindowedSubscriber.drain(WindowedSubscriber.java:223) at com.azure.messaging.servicebus.WindowedSubscriber.enqueueRequestImpl(WindowedSubscriber.java:144) at com.azure.messaging.servicebus.WindowedSubscriberFluxWindowIsolatedTest.lambda$shouldPickEnqueuedWindowRequestsOnSubscriptionReady$19(WindowedSubscriberFluxWindowIsolatedTest.java:326) at reactor.test.DefaultStepVerifierBuilder$DefaultStepVerifier.toVerifierAndSubscribe(DefaultStepVerifierBuilder.java:862) at reactor.test.DefaultStepVerifierBuilder$DefaultStepVerifier.verify(DefaultStepVerifierBuilder.java:831) at reactor.test.DefaultStepVerifierBuilder$DefaultStepVerifier.verify(DefaultStepVerifierBuilder.java:823) at reactor.test.DefaultStepVerifierBuilder.verifyComplete(DefaultStepVerifierBuilder.java:690) at com.azure.messaging.servicebus.WindowedSubscriberFluxWindowIsolatedTest.shouldPickEnqueuedWindowRequestsOnSubscriptionReady(WindowedSubscriberFluxWindowIsolatedTest.java:344) ``` All the tests in this class are running under `VirtualTimeScheduler` (VTS) and once in a while the test "shouldPickEnqueuedWindowRequestsOnSubscriptionReady“ fails with "Scheduler unavailable". From the stack trace this error comes from the scheduler (should be VTS in case of test) when WindowedSubscriber tries to register the timeout as part of starting the first WindowWork. While in the real-world scenario the scheduler for timeout will be the global `ParallelScheduler` not VTS, both schedulers can throw this error only when it’s shutdown. In the test we shut down the local VTS (scoped to the test) only when the test is completed, not in the middle of the test run. It’s known that VTS infra tracks the current VTS instance in a static member, so we had seen similar "scheduler unavailable" when two test using VTS infra runs in parallel and completion of one disposes the static current VTS. But as of today, we run all VTS tests in Isolated mode, so this should not happen. Given this is so random failure and also observed happening only when CI is loaded, adding this logging function to shed some light into what’s going on here. The tests log using this function in case the VTS gets closed unexpectedly, this helps us, 1. To see if the problem is local to the test, I.e., test run forward and closes the VTS. 2. To see if the problem triggered from outside of the test, i.e., the VTS infra itself tear down the static current VTS holder. 3. To see if there is for some reason VTS is not even gets injected. | void logIfClosedUnexpectedly(ClientLogger logger) {
final boolean wasAutoClosed = get();
final boolean isSchedulerDisposed = scheduler.isDisposed();
if (wasAutoClosed || isSchedulerDisposed) {
if (!wasAutoClosed) {
logger.atError().log("VirtualTimeScheduler unavailable (unexpected close from outside of the test).");
} else {
logger.atError().log("VirtualTimeScheduler unavailable (unexpected close by the test).");
}
}
} | } | void logIfClosedUnexpectedly(ClientLogger logger) {
final boolean wasAutoClosed = get();
final boolean isSchedulerDisposed = scheduler.isDisposed();
if (wasAutoClosed || isSchedulerDisposed) {
if (!wasAutoClosed) {
logger.atError().log("VirtualTimeScheduler unavailable (unexpected close from outside of the test).");
} else {
logger.atError().log("VirtualTimeScheduler unavailable (unexpected close by the test).");
}
}
} | class VirtualTimeStepVerifier extends AtomicBoolean implements AutoCloseable {
private final VirtualTimeScheduler scheduler;
VirtualTimeStepVerifier() {
super(false);
scheduler = VirtualTimeScheduler.create();
}
<T> StepVerifier.Step<T> create(Supplier<Publisher<T>> scenarioSupplier) {
return StepVerifier.withVirtualTime(scenarioSupplier, () -> scheduler, Integer.MAX_VALUE);
}
@Override
public void close() {
super.set(true);
scheduler.dispose();
}
} | class VirtualTimeStepVerifier extends AtomicBoolean implements AutoCloseable {
private final VirtualTimeScheduler scheduler;
VirtualTimeStepVerifier() {
super(false);
scheduler = VirtualTimeScheduler.create();
}
<T> StepVerifier.Step<T> create(Supplier<Publisher<T>> scenarioSupplier) {
return StepVerifier.withVirtualTime(scenarioSupplier, () -> scheduler, Integer.MAX_VALUE);
}
@Override
public void close() {
super.set(true);
scheduler.dispose();
}
} |
com.azure.core.management.exception.ManagementException: Status code 400, "{"error":{"code":"CloudServiceConfigurationRetired","message":"CloudServiceConfiguration pools are retired as of 2/29/2024, and new pools of this type cannot be added\nRequestId:80525c33-84a1-4685-b3c8-ab2a03566f10\nTime:2024-03-27T07:04:01.2339901Z","target":"BatchAccount"}}" `CloudServiceConfiguration` is not supported anymore. | public void testCRUDBatchPool() {
BatchAccount account = null;
Pool pool = null;
try {
String batchAccountName = "sa" + randomPadding();
String poolName = "bp" + randomPadding();
String poolDisplayName = "bpdn" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
pool = batchManager.pools()
.define(poolName)
.withExistingBatchAccount(resourceGroup, batchAccountName)
.withDisplayName(poolDisplayName)
.withDeploymentConfiguration(
new DeploymentConfiguration()
.withVirtualMachineConfiguration(
new VirtualMachineConfiguration()
.withImageReference(new ImageReference().withPublisher("Canonical")
.withOffer("UbuntuServer").withSku("18.04-LTS").withVersion("latest"))
.withNodeAgentSkuId("batch.node.ubuntu 18.04")))
.withScaleSettings(
new ScaleSettings()
.withFixedScale(
new FixedScaleSettings()
.withResizeTimeout(Duration.parse("PT8M"))
.withTargetDedicatedNodes(1)
.withTargetLowPriorityNodes(1)
.withNodeDeallocationOption(ComputeNodeDeallocationOption.TASK_COMPLETION)))
.withVmSize("Standard_D1")
.create();
Assertions.assertEquals(poolName, pool.name());
Assertions.assertEquals(poolDisplayName, pool.displayName());
Assertions.assertNull(pool.scaleSettings().autoScale());
Assertions.assertEquals(pool.scaleSettings().fixedScale().nodeDeallocationOption(), ComputeNodeDeallocationOption.TASK_COMPLETION);
} finally {
if (pool != null) {
batchManager.pools().deleteById(pool.id());
}
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
}
} | .withNodeAgentSkuId("batch.node.ubuntu 18.04"))) | public void testCRUDBatchPool() {
BatchAccount account = null;
Pool pool = null;
try {
String batchAccountName = "sa" + randomPadding();
String poolName = "bp" + randomPadding();
String poolDisplayName = "bpdn" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
pool = batchManager.pools()
.define(poolName)
.withExistingBatchAccount(resourceGroup, batchAccountName)
.withDisplayName(poolDisplayName)
.withDeploymentConfiguration(
new DeploymentConfiguration()
.withVirtualMachineConfiguration(
new VirtualMachineConfiguration()
.withImageReference(new ImageReference().withPublisher("Canonical")
.withOffer("UbuntuServer").withSku("18.04-LTS").withVersion("latest"))
.withNodeAgentSkuId("batch.node.ubuntu 18.04")))
.withScaleSettings(
new ScaleSettings()
.withFixedScale(
new FixedScaleSettings()
.withResizeTimeout(Duration.parse("PT8M"))
.withTargetDedicatedNodes(1)
.withTargetLowPriorityNodes(1)
.withNodeDeallocationOption(ComputeNodeDeallocationOption.TASK_COMPLETION)))
.withVmSize("Standard_D1")
.create();
Assertions.assertEquals(poolName, pool.name());
Assertions.assertEquals(poolDisplayName, pool.displayName());
Assertions.assertNull(pool.scaleSettings().autoScale());
Assertions.assertEquals(pool.scaleSettings().fixedScale().nodeDeallocationOption(), ComputeNodeDeallocationOption.TASK_COMPLETION);
} finally {
if (pool != null) {
batchManager.pools().deleteById(pool.id());
}
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
}
} | class BatchTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_WEST2;
private String resourceGroup = "rg" + randomPadding();
private BatchManager batchManager;
private StorageManager storageManager;
private boolean testEnv;
@Override
public void beforeTest() {
batchManager = BatchManager
.configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE));
storageManager = StorageManager
.configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE));
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroup = testResourceGroup;
} else {
storageManager.resourceManager().resourceGroups().define(resourceGroup)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
storageManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testCreateBatchAccount() {
StorageAccount storageAccount = null;
BatchAccount account = null;
try {
final String storageAccountName = "sa" + randomPadding();
storageAccount = storageManager.storageAccounts().define(storageAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
final String batchAccountName = "ba" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.withAutoStorage(
new AutoStorageBaseProperties()
.withStorageAccountId(storageAccount.id()))
.create();
assertNotNull(account);
BatchAccount batchAccount = batchManager.batchAccounts().getByResourceGroup(resourceGroup, batchAccountName);
assertEquals(batchAccountName, batchAccount.name());
assertEquals(REGION.toString(), batchAccount.location());
} finally {
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
if (storageAccount != null) {
storageManager.storageAccounts().deleteById(storageAccount.id());
}
}
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testCRUDBatchAccount() {
BatchAccount account = null;
StorageAccount storageAccount = null;
final String batchAccountName;
try {
batchAccountName = "sa" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
Assertions.assertNull(account.autoStorage());
BatchAccountKeys keys = account.getKeys();
Assertions.assertNotNull(keys.primary());
Assertions.assertNotNull(keys.secondary());
BatchAccountKeys regeneratedKeys = account.regenerateKey(new BatchAccountRegenerateKeyParameters().withKeyName(AccountKeyType.PRIMARY));
Assertions.assertNotNull(regeneratedKeys.primary());
Assertions.assertNotNull(regeneratedKeys.secondary());
final String storageAccountName = "sa" + randomPadding();
storageAccount = storageManager
.storageAccounts()
.define(storageAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
account
.update()
.withAutoStorage(new AutoStorageBaseProperties().withStorageAccountId(storageAccount.id()))
.apply();
Assertions.assertNotNull(account.autoStorage().storageAccountId());
OffsetDateTime lastKeySync = account.autoStorage().lastKeySync();
Assertions.assertNotNull(lastKeySync);
account.synchronizeAutoStorageKeys();
account.refresh();
Assertions.assertNotEquals(lastKeySync, account.autoStorage().lastKeySync());
} finally {
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
if (storageAccount != null) {
storageManager.storageAccounts().deleteById(storageAccount.id());
}
}
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testCRUDBatchApplication() {
StorageAccount storageAccount = null;
BatchAccount account = null;
Application application = null;
ApplicationPackage applicationPackage = null;
final String batchAccountName;
final String applicationName;
String packageVersion;
try {
final String storageAccountName = "sa" + randomPadding();
storageAccount = storageManager
.storageAccounts().
define(storageAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
batchAccountName = "sa" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.withAutoStorage(new AutoStorageBaseProperties().withStorageAccountId(storageAccount.id()))
.create();
applicationName = "ba" + randomPadding();
String displayName = "badn" + randomPadding();
application = batchManager
.applications()
.define(applicationName)
.withExistingBatchAccount(resourceGroup, batchAccountName)
.withDisplayName(displayName)
.withAllowUpdates(true)
.create();
Assertions.assertEquals(application.displayName(), displayName);
Assertions.assertEquals(application.name(), applicationName);
Assertions.assertNull(application.defaultVersion());
String newDisplayName = "newbadn" + randomPadding();
application
.update()
.withDisplayName(newDisplayName)
.apply();
Assertions.assertNotEquals(displayName, application.displayName());
packageVersion = "version" + randomPadding();
applicationPackage = batchManager
.applicationPackages()
.define(packageVersion)
.withExistingApplication(resourceGroup, batchAccountName, applicationName)
.create();
Assertions.assertNotNull(applicationPackage);
Assertions.assertNull(applicationPackage.lastActivationTime());
} finally {
if (applicationPackage != null) {
batchManager.applicationPackages().deleteById(applicationPackage.id());
}
if (application != null) {
batchManager.applications().deleteById(application.id());
}
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
if (storageAccount != null) {
storageManager.storageAccounts().deleteById(storageAccount.id());
}
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} | class BatchTests extends TestBase {
private static final Random RANDOM = new Random();
private static final Region REGION = Region.US_WEST2;
private String resourceGroup = "rg" + randomPadding();
private BatchManager batchManager;
private StorageManager storageManager;
private boolean testEnv;
@Override
public void beforeTest() {
batchManager = BatchManager
.configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE));
storageManager = StorageManager
.configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC))
.authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE));
String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME");
testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup);
if (testEnv) {
resourceGroup = testResourceGroup;
} else {
storageManager.resourceManager().resourceGroups().define(resourceGroup)
.withRegion(REGION)
.create();
}
}
@Override
protected void afterTest() {
if (!testEnv) {
storageManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup);
}
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testCreateBatchAccount() {
StorageAccount storageAccount = null;
BatchAccount account = null;
try {
final String storageAccountName = "sa" + randomPadding();
storageAccount = storageManager.storageAccounts().define(storageAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
final String batchAccountName = "ba" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.withAutoStorage(
new AutoStorageBaseProperties()
.withStorageAccountId(storageAccount.id()))
.create();
assertNotNull(account);
BatchAccount batchAccount = batchManager.batchAccounts().getByResourceGroup(resourceGroup, batchAccountName);
assertEquals(batchAccountName, batchAccount.name());
assertEquals(REGION.toString(), batchAccount.location());
} finally {
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
if (storageAccount != null) {
storageManager.storageAccounts().deleteById(storageAccount.id());
}
}
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testCRUDBatchAccount() {
BatchAccount account = null;
StorageAccount storageAccount = null;
final String batchAccountName;
try {
batchAccountName = "sa" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
Assertions.assertNull(account.autoStorage());
BatchAccountKeys keys = account.getKeys();
Assertions.assertNotNull(keys.primary());
Assertions.assertNotNull(keys.secondary());
BatchAccountKeys regeneratedKeys = account.regenerateKey(new BatchAccountRegenerateKeyParameters().withKeyName(AccountKeyType.PRIMARY));
Assertions.assertNotNull(regeneratedKeys.primary());
Assertions.assertNotNull(regeneratedKeys.secondary());
final String storageAccountName = "sa" + randomPadding();
storageAccount = storageManager
.storageAccounts()
.define(storageAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
account
.update()
.withAutoStorage(new AutoStorageBaseProperties().withStorageAccountId(storageAccount.id()))
.apply();
Assertions.assertNotNull(account.autoStorage().storageAccountId());
OffsetDateTime lastKeySync = account.autoStorage().lastKeySync();
Assertions.assertNotNull(lastKeySync);
account.synchronizeAutoStorageKeys();
account.refresh();
Assertions.assertNotEquals(lastKeySync, account.autoStorage().lastKeySync());
} finally {
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
if (storageAccount != null) {
storageManager.storageAccounts().deleteById(storageAccount.id());
}
}
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testCRUDBatchApplication() {
StorageAccount storageAccount = null;
BatchAccount account = null;
Application application = null;
ApplicationPackage applicationPackage = null;
final String batchAccountName;
final String applicationName;
String packageVersion;
try {
final String storageAccountName = "sa" + randomPadding();
storageAccount = storageManager
.storageAccounts().
define(storageAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.create();
batchAccountName = "sa" + randomPadding();
account = batchManager
.batchAccounts()
.define(batchAccountName)
.withRegion(REGION)
.withExistingResourceGroup(resourceGroup)
.withAutoStorage(new AutoStorageBaseProperties().withStorageAccountId(storageAccount.id()))
.create();
applicationName = "ba" + randomPadding();
String displayName = "badn" + randomPadding();
application = batchManager
.applications()
.define(applicationName)
.withExistingBatchAccount(resourceGroup, batchAccountName)
.withDisplayName(displayName)
.withAllowUpdates(true)
.create();
Assertions.assertEquals(application.displayName(), displayName);
Assertions.assertEquals(application.name(), applicationName);
Assertions.assertNull(application.defaultVersion());
String newDisplayName = "newbadn" + randomPadding();
application
.update()
.withDisplayName(newDisplayName)
.apply();
Assertions.assertNotEquals(displayName, application.displayName());
packageVersion = "version" + randomPadding();
applicationPackage = batchManager
.applicationPackages()
.define(packageVersion)
.withExistingApplication(resourceGroup, batchAccountName, applicationName)
.create();
Assertions.assertNotNull(applicationPackage);
Assertions.assertNull(applicationPackage.lastActivationTime());
} finally {
if (applicationPackage != null) {
batchManager.applicationPackages().deleteById(applicationPackage.id());
}
if (application != null) {
batchManager.applications().deleteById(application.id());
}
if (account != null) {
batchManager.batchAccounts().deleteById(account.id());
}
if (storageAccount != null) {
storageManager.storageAccounts().deleteById(storageAccount.id());
}
}
}
@Test
@DoNotRecord(skipInPlayback = true)
private static String randomPadding() {
return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000));
}
} |
Our start log is info level, do we need this info as well? | public void stop() {
LOGGER.debug("Kafka Cosmos sink connector {} is stopped.");
} | LOGGER.debug("Kafka Cosmos sink connector {} is stopped."); | public void stop() {
LOGGER.info("Kafka Cosmos sink connector {} is stopped.");
} | class CosmosSinkConnector extends SinkConnector {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSinkConnector.class);
private CosmosSinkConfig sinkConfig;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos sink connector");
this.sinkConfig = new CosmosSinkConfig(props);
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSinkTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
LOGGER.info("Setting task configurations with maxTasks {}", maxTasks);
List<Map<String, String>> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
configs.add(this.sinkConfig.originalsStrings());
}
return configs;
}
@Override
@Override
public ConfigDef config() {
return CosmosSinkConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
} | class CosmosSinkConnector extends SinkConnector {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSinkConnector.class);
private CosmosSinkConfig sinkConfig;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos sink connector");
this.sinkConfig = new CosmosSinkConfig(props);
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSinkTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
LOGGER.info("Setting task configurations with maxTasks {}", maxTasks);
List<Map<String, String>> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
configs.add(this.sinkConfig.originalsStrings());
}
return configs;
}
@Override
@Override
public ConfigDef config() {
return CosmosSinkConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
} |
info will be better, changed to use info | public void stop() {
LOGGER.debug("Kafka Cosmos sink connector {} is stopped.");
} | LOGGER.debug("Kafka Cosmos sink connector {} is stopped."); | public void stop() {
LOGGER.info("Kafka Cosmos sink connector {} is stopped.");
} | class CosmosSinkConnector extends SinkConnector {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSinkConnector.class);
private CosmosSinkConfig sinkConfig;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos sink connector");
this.sinkConfig = new CosmosSinkConfig(props);
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSinkTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
LOGGER.info("Setting task configurations with maxTasks {}", maxTasks);
List<Map<String, String>> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
configs.add(this.sinkConfig.originalsStrings());
}
return configs;
}
@Override
@Override
public ConfigDef config() {
return CosmosSinkConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
} | class CosmosSinkConnector extends SinkConnector {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSinkConnector.class);
private CosmosSinkConfig sinkConfig;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos sink connector");
this.sinkConfig = new CosmosSinkConfig(props);
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSinkTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
LOGGER.info("Setting task configurations with maxTasks {}", maxTasks);
List<Map<String, String>> configs = new ArrayList<>();
for (int i = 0; i < maxTasks; i++) {
configs.add(this.sinkConfig.originalsStrings());
}
return configs;
}
@Override
@Override
public ConfigDef config() {
return CosmosSinkConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
} |
nit: Use the `BlobOutputStream` as part of the try-with-resources block ```java try (CrcInputStream inputStream = new CrcInputStream(originalContent.getBlobContentHead(), options.getSize()); BlobOutputStream outputStream = appendBlobClient.getBlobOutputStream(true)) { ``` | protected void runInternal(Context span) throws IOException {
AppendBlobClient appendBlobClient = syncClient.getAppendBlobClient();
BlobOutputStream outputStream = appendBlobClient.getBlobOutputStream(true);
try (CrcInputStream inputStream = new CrcInputStream(originalContent.getBlobContentHead(), options.getSize())) {
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, bytesRead);
}
outputStream.close();
originalContent.checkMatch(inputStream.getContentInfo(), span).block();
} catch (Exception e) {
outputStream.close();
}
} | outputStream.close(); | protected void runInternal(Context span) throws IOException {
AppendBlobClient appendBlobClient = syncClient.getAppendBlobClient();
try (CrcInputStream inputStream = new CrcInputStream(originalContent.getBlobContentHead(), options.getSize());
BlobOutputStream outputStream = appendBlobClient.getBlobOutputStream(true)) {
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, bytesRead);
}
outputStream.close();
originalContent.checkMatch(inputStream.getContentInfo(), span).block();
}
} | class AppendBlobOutputStream extends BlobScenarioBase<StorageStressOptions> {
ClientLogger LOGGER = new ClientLogger(BlockBlobOutputStream.class);
private final OriginalContent originalContent = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncNoFaultClient;
private final BlobAsyncClient tempSetupBlobClient;
public AppendBlobOutputStream(StorageStressOptions options) {
super(options);
String blobName = generateBlobName();
String tempBlobName = generateBlobName();
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(blobName);
this.syncClient = getSyncContainerClient().getBlobClient(blobName);
this.tempSetupBlobClient = getAsyncContainerClientNoFault().getBlobAsyncClient(tempBlobName);
}
@Override
@Override
protected Mono<Void> runInternalAsync(Context span) {
return monoError(LOGGER, new RuntimeException("getBlobOutputStream() does not exist on the async client"));
}
@Override
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(asyncNoFaultClient.getAppendBlobAsyncClient().create())
.then(originalContent.setupBlob(tempSetupBlobClient, options.getSize()));
}
@Override
public Mono<Void> cleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.cleanupAsync());
}
} | class AppendBlobOutputStream extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(BlockBlobOutputStream.class);
private final OriginalContent originalContent = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncNoFaultClient;
private final BlobAsyncClient tempSetupBlobClient;
public AppendBlobOutputStream(StorageStressOptions options) {
super(options);
String blobName = generateBlobName();
String tempBlobName = generateBlobName();
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(blobName);
this.syncClient = getSyncContainerClient().getBlobClient(blobName);
this.tempSetupBlobClient = getAsyncContainerClientNoFault().getBlobAsyncClient(tempBlobName);
}
@Override
@Override
protected Mono<Void> runInternalAsync(Context span) {
return monoError(LOGGER, new RuntimeException("getBlobOutputStream() does not exist on the async client"));
}
@Override
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(asyncNoFaultClient.getAppendBlobAsyncClient().create())
.then(originalContent.setupBlob(tempSetupBlobClient, options.getSize()));
}
@Override
public Mono<Void> cleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.cleanupAsync());
}
} |
Need one more change here to remove this close as the try-with-resource will handle that. Unless this close was being called to perform flush, if so, call flush here instead of close | protected void runInternal(Context span) throws IOException {
AppendBlobClient appendBlobClient = syncClient.getAppendBlobClient();
BlobOutputStream outputStream = appendBlobClient.getBlobOutputStream(true);
try (CrcInputStream inputStream = new CrcInputStream(originalContent.getBlobContentHead(), options.getSize())) {
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, bytesRead);
}
outputStream.close();
originalContent.checkMatch(inputStream.getContentInfo(), span).block();
} catch (Exception e) {
outputStream.close();
}
} | outputStream.close(); | protected void runInternal(Context span) throws IOException {
AppendBlobClient appendBlobClient = syncClient.getAppendBlobClient();
try (CrcInputStream inputStream = new CrcInputStream(originalContent.getBlobContentHead(), options.getSize());
BlobOutputStream outputStream = appendBlobClient.getBlobOutputStream(true)) {
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, bytesRead);
}
outputStream.close();
originalContent.checkMatch(inputStream.getContentInfo(), span).block();
}
} | class AppendBlobOutputStream extends BlobScenarioBase<StorageStressOptions> {
ClientLogger LOGGER = new ClientLogger(BlockBlobOutputStream.class);
private final OriginalContent originalContent = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncNoFaultClient;
private final BlobAsyncClient tempSetupBlobClient;
public AppendBlobOutputStream(StorageStressOptions options) {
super(options);
String blobName = generateBlobName();
String tempBlobName = generateBlobName();
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(blobName);
this.syncClient = getSyncContainerClient().getBlobClient(blobName);
this.tempSetupBlobClient = getAsyncContainerClientNoFault().getBlobAsyncClient(tempBlobName);
}
@Override
@Override
protected Mono<Void> runInternalAsync(Context span) {
return monoError(LOGGER, new RuntimeException("getBlobOutputStream() does not exist on the async client"));
}
@Override
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(asyncNoFaultClient.getAppendBlobAsyncClient().create())
.then(originalContent.setupBlob(tempSetupBlobClient, options.getSize()));
}
@Override
public Mono<Void> cleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.cleanupAsync());
}
} | class AppendBlobOutputStream extends BlobScenarioBase<StorageStressOptions> {
private static final ClientLogger LOGGER = new ClientLogger(BlockBlobOutputStream.class);
private final OriginalContent originalContent = new OriginalContent();
private final BlobClient syncClient;
private final BlobAsyncClient asyncNoFaultClient;
private final BlobAsyncClient tempSetupBlobClient;
public AppendBlobOutputStream(StorageStressOptions options) {
super(options);
String blobName = generateBlobName();
String tempBlobName = generateBlobName();
this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(blobName);
this.syncClient = getSyncContainerClient().getBlobClient(blobName);
this.tempSetupBlobClient = getAsyncContainerClientNoFault().getBlobAsyncClient(tempBlobName);
}
@Override
@Override
protected Mono<Void> runInternalAsync(Context span) {
return monoError(LOGGER, new RuntimeException("getBlobOutputStream() does not exist on the async client"));
}
@Override
public Mono<Void> setupAsync() {
return super.setupAsync()
.then(asyncNoFaultClient.getAppendBlobAsyncClient().create())
.then(originalContent.setupBlob(tempSetupBlobClient, options.getSize()));
}
@Override
public Mono<Void> cleanupAsync() {
return asyncNoFaultClient.delete()
.then(super.cleanupAsync());
}
} |
Should be able to just use `BinaryData.toByteBuffer()` | public static ContentInfo fromBinaryData(BinaryData data) {
ByteBuffer bb = ByteBuffer.wrap(data.toBytes());
long length = bb.remaining();
ByteBuffer head = ByteBuffer.allocate(1024);
CRC32 crc = new CRC32();
if (length > 0) {
if (head.remaining() > 0) {
int toRead = Math.min(head.remaining(), bb.remaining());
byte[] temp = new byte[toRead];
bb.get(temp, 0, toRead);
head.put(temp);
}
bb.rewind();
byte[] crcTemp = new byte[bb.remaining()];
bb.get(crcTemp);
crc.update(crcTemp, 0, crcTemp.length);
}
return new ContentInfo(crc.getValue(), length, head);
} | ByteBuffer bb = ByteBuffer.wrap(data.toBytes()); | public static ContentInfo fromBinaryData(BinaryData data) {
ByteBuffer bb = data.toByteBuffer();
AtomicLong length = new AtomicLong(0);
CRC32 crc = new CRC32();
ByteBuffer head = ByteBuffer.allocate(1024);
processBuffer(bb, crc, length, head);
return new ContentInfo(crc.getValue(), length.get(), (ByteBuffer) head.flip());
} | class ContentInfo {
private final long length;
private final long crc;
private final ByteBuffer head;
ContentInfo(long crc, long length, ByteBuffer head) {
this.crc = crc;
this.length = length;
this.head = head;
}
public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) {
AtomicLong length = new AtomicLong(0);
ByteBuffer head = ByteBuffer.allocate(1024);
Mono<Long> crcMono = data
.reduce(new CRC32(),
(crc, bb) -> {
int remaining = bb.remaining();
length.getAndAdd(remaining);
if (head.hasRemaining()) {
int toRead = Math.min(head.remaining(), remaining);
byte[] temp = new byte[toRead];
bb.get(temp, 0, toRead);
head.put(temp);
}
bb.rewind();
byte[] crcTemp = new byte[remaining];
bb.get(crcTemp);
crc.update(crcTemp, 0, remaining);
return crc;
})
.map(CRC32::getValue);
return crcMono.map(crc -> new ContentInfo(crc, length.get(), head));
}
public long getLength() {
return length;
}
public long getCrc() {
return crc;
}
public ByteBuffer getHead() {
return head;
}
} | class ContentInfo {
private final long length;
private final long crc;
private final ByteBuffer head;
ContentInfo(long crc, long length, ByteBuffer head) {
this.crc = crc;
this.length = length;
this.head = head;
}
private static void processBuffer(ByteBuffer bb, CRC32 crc, AtomicLong length, ByteBuffer head) {
int remaining = bb.remaining();
length.getAndAdd(remaining);
if (head.hasRemaining()) {
int toRead = Math.min(head.remaining(), remaining);
byte[] temp = new byte[toRead];
bb.get(temp, 0, toRead);
head.put(temp);
}
bb.rewind();
byte[] crcTemp = new byte[bb.remaining()];
bb.get(crcTemp);
crc.update(crcTemp, 0, crcTemp.length);
}
public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) {
AtomicLong length = new AtomicLong(0);
CRC32 crc = new CRC32();
ByteBuffer head = ByteBuffer.allocate(1024);
return data
.doOnNext(bb -> processBuffer(bb.duplicate(), crc, length, head))
.then(Mono.fromCallable(() -> new ContentInfo(crc.getValue(), length.get(), (ByteBuffer) head.flip())));
}
public long getLength() {
return length;
}
public long getCrc() {
return crc;
}
public ByteBuffer getHead() {
return head;
}
} |
Might be easier to just use `ByteBuffer.duplicate` in the code block copying to `head`. | public static ContentInfo fromBinaryData(BinaryData data) {
ByteBuffer bb = ByteBuffer.wrap(data.toBytes());
long length = bb.remaining();
ByteBuffer head = ByteBuffer.allocate(1024);
CRC32 crc = new CRC32();
if (length > 0) {
if (head.remaining() > 0) {
int toRead = Math.min(head.remaining(), bb.remaining());
byte[] temp = new byte[toRead];
bb.get(temp, 0, toRead);
head.put(temp);
}
bb.rewind();
byte[] crcTemp = new byte[bb.remaining()];
bb.get(crcTemp);
crc.update(crcTemp, 0, crcTemp.length);
}
return new ContentInfo(crc.getValue(), length, head);
} | bb.rewind(); | public static ContentInfo fromBinaryData(BinaryData data) {
ByteBuffer bb = data.toByteBuffer();
AtomicLong length = new AtomicLong(0);
CRC32 crc = new CRC32();
ByteBuffer head = ByteBuffer.allocate(1024);
processBuffer(bb, crc, length, head);
return new ContentInfo(crc.getValue(), length.get(), (ByteBuffer) head.flip());
} | class ContentInfo {
private final long length;
private final long crc;
private final ByteBuffer head;
ContentInfo(long crc, long length, ByteBuffer head) {
this.crc = crc;
this.length = length;
this.head = head;
}
public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) {
AtomicLong length = new AtomicLong(0);
ByteBuffer head = ByteBuffer.allocate(1024);
Mono<Long> crcMono = data
.reduce(new CRC32(),
(crc, bb) -> {
int remaining = bb.remaining();
length.getAndAdd(remaining);
if (head.hasRemaining()) {
int toRead = Math.min(head.remaining(), remaining);
byte[] temp = new byte[toRead];
bb.get(temp, 0, toRead);
head.put(temp);
}
bb.rewind();
byte[] crcTemp = new byte[remaining];
bb.get(crcTemp);
crc.update(crcTemp, 0, remaining);
return crc;
})
.map(CRC32::getValue);
return crcMono.map(crc -> new ContentInfo(crc, length.get(), head));
}
public long getLength() {
return length;
}
public long getCrc() {
return crc;
}
public ByteBuffer getHead() {
return head;
}
} | class ContentInfo {
private final long length;
private final long crc;
private final ByteBuffer head;
ContentInfo(long crc, long length, ByteBuffer head) {
this.crc = crc;
this.length = length;
this.head = head;
}
private static void processBuffer(ByteBuffer bb, CRC32 crc, AtomicLong length, ByteBuffer head) {
int remaining = bb.remaining();
length.getAndAdd(remaining);
if (head.hasRemaining()) {
int toRead = Math.min(head.remaining(), remaining);
byte[] temp = new byte[toRead];
bb.get(temp, 0, toRead);
head.put(temp);
}
bb.rewind();
byte[] crcTemp = new byte[bb.remaining()];
bb.get(crcTemp);
crc.update(crcTemp, 0, crcTemp.length);
}
public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) {
AtomicLong length = new AtomicLong(0);
CRC32 crc = new CRC32();
ByteBuffer head = ByteBuffer.allocate(1024);
return data
.doOnNext(bb -> processBuffer(bb.duplicate(), crc, length, head))
.then(Mono.fromCallable(() -> new ContentInfo(crc.getValue(), length.get(), (ByteBuffer) head.flip())));
}
public long getLength() {
return length;
}
public long getCrc() {
return crc;
}
public ByteBuffer getHead() {
return head;
}
} |
nit: do w still need the comment here? | public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) {
AtomicLong length = new AtomicLong(0);
ByteBuffer head = ByteBuffer.allocate(1024);
Mono<Long> crcMono = data
.reduce(new CRC32(),
(crc, bb) -> {
length.getAndAdd(bb.remaining());
if (head.hasRemaining())
{
ByteBuffer dup = bb.duplicate();
while (head.hasRemaining() && dup.hasRemaining()) {
head.put(dup.get());
}
}
crc.update(bb);
return crc;
})
.map(CRC32::getValue);
return crcMono.map(crc -> new ContentInfo(crc, length.get(), head));
} | public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) {
AtomicLong length = new AtomicLong(0);
CRC32 crc = new CRC32();
ByteBuffer head = ByteBuffer.allocate(1024);
return data
.doOnNext(bb -> processBuffer(bb.duplicate(), crc, length, head))
.then(Mono.fromCallable(() -> new ContentInfo(crc.getValue(), length.get(), (ByteBuffer) head.flip())));
} | class ContentInfo {
private final long length;
private final long crc;
private final ByteBuffer head;
ContentInfo(long crc, long length, ByteBuffer head) {
this.crc = crc;
this.length = length;
this.head = head;
}
public static ContentInfo fromBinaryData(BinaryData data) {
ByteBuffer bb = data.toByteBuffer();
long length = bb.remaining();
ByteBuffer head = ByteBuffer.allocate(1024);
CRC32 crc = new CRC32();
if (length > 0) {
if (head.remaining() > 0) {
int toRead = Math.min(head.remaining(), bb.remaining());
byte[] temp = new byte[toRead];
ByteBuffer dup = bb.duplicate();
dup.get(temp);
head.put(temp);
}
crc.update(bb);
}
return new ContentInfo(crc.getValue(), length, head);
}
public long getLength() {
return length;
}
public long getCrc() {
return crc;
}
public ByteBuffer getHead() {
return head;
}
} | class ContentInfo {
private final long length;
private final long crc;
private final ByteBuffer head;
ContentInfo(long crc, long length, ByteBuffer head) {
this.crc = crc;
this.length = length;
this.head = head;
}
private static void processBuffer(ByteBuffer bb, CRC32 crc, AtomicLong length, ByteBuffer head) {
int remaining = bb.remaining();
length.getAndAdd(remaining);
if (head.hasRemaining()) {
int toRead = Math.min(head.remaining(), remaining);
byte[] temp = new byte[toRead];
bb.get(temp, 0, toRead);
head.put(temp);
}
bb.rewind();
byte[] crcTemp = new byte[bb.remaining()];
bb.get(crcTemp);
crc.update(crcTemp, 0, crcTemp.length);
}
public static ContentInfo fromBinaryData(BinaryData data) {
ByteBuffer bb = data.toByteBuffer();
AtomicLong length = new AtomicLong(0);
CRC32 crc = new CRC32();
ByteBuffer head = ByteBuffer.allocate(1024);
processBuffer(bb, crc, length, head);
return new ContentInfo(crc.getValue(), length.get(), (ByteBuffer) head.flip());
}
public long getLength() {
return length;
}
public long getCrc() {
return crc;
}
public ByteBuffer getHead() {
return head;
}
} | |
I don't think hardcoding is helpful - it's really easy to forget to update it and would lead to more confusion at the results. If we can't find a way to get a real package version, leaving it empty or removing the attribute would be a better choice | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.packageType = scenarioClass.getPackage().toString();
if (packageType.contains("blob")) {
this.packageVersion = "12.25.1";
} else if (packageType.contains("datalake")) {
this.packageVersion = "12.18.1";
} else if (packageType.contains(("share"))) {
this.packageVersion = "12.21.1";
} else {
this.packageVersion = null;
}
} | this.packageVersion = "12.25.1"; | public TelemetryHelper(Class<?> scenarioClass) {
this.scenarioName = scenarioClass.getName();
this.tracer = OTEL.getTracer(scenarioName);
this.meter = OTEL.getMeter(scenarioName);
this.logger = new ClientLogger(scenarioName);
this.runDuration = meter.histogramBuilder("test.run.duration")
.setUnit("s")
.build();
this.commonAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName);
this.canceledAttributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, "cancelled");
this.packageType = scenarioClass.getPackage().toString();
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private static final OpenTelemetry OTEL;
private final String scenarioName;
private final Meter meter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
private final String packageType;
private final String packageVersion;
private final AtomicLong successfulRuns = new AtomicLong();
private final AtomicLong failedRuns = new AtomicLong();
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
public static OpenTelemetry init() {
if (OTEL != null) {
return OTEL;
}
System.setProperty("otel.java.global-autoconfigure.enabled", "true");
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
System.setProperty("otel.traces.exporter", "none");
System.setProperty("otel.metrics.exporter", "none");
System.setProperty("otel.logs.exporter", "none");
} else {
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
}
OpenTelemetry otel = sdkBuilder
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
*/
@SuppressWarnings("try")
public void instrumentRun(ThrowingFunction oneRun) {
Instant start = Instant.now();
Span span = tracer.spanBuilder("run").startSpan();
try (Scope s = span.makeCurrent()) {
com.azure.core.util.Context ctx = new com.azure.core.util.Context(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, io.opentelemetry.context.Context.current());
oneRun.run(ctx);
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span);
}
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Function<com.azure.core.util.Context, Mono<Void>> runAsync) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder("runAsync").startSpan();
try (Scope s = span.makeCurrent()) {
com.azure.core.util.Context ctx = new com.azure.core.util.Context(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, io.opentelemetry.context.Context.current());
return runAsync.apply(ctx).doOnError(e -> trackFailure(start, e, span))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, io.opentelemetry.context.Context.current()));
}
});
}
private void trackSuccess(Instant start, Span span) {
logger.atVerbose()
.addKeyValue("traceId", span.getSpanContext().getTraceId())
.addKeyValue("status", "success")
.log("run ended");
runDuration.record(getDuration(start), commonAttributes);
successfulRuns.incrementAndGet();
span.end();
logger.info("track success");
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, unwrapped.getClass().getName());
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
logger.atError()
.addKeyValue("error.type", errorType)
.addKeyValue("traceId", span.getSpanContext().getTraceId())
.log("run ended", unwrapped);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
failedRuns.incrementAndGet();
logger.info("track failure");
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param options test parameters
*/
public void recordStart(StorageStressOptions options) {
String storagePackageVersion = packageType + "/" + packageVersion;
System.out.println("storagePackageVersion: " + storagePackageVersion);
Span before = startSampledInSpan("before run");
before.setAttribute(AttributeKey.longKey("durationSec"), options.getDuration());
before.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
before.setAttribute(AttributeKey.longKey("concurrency"), options.getParallel());
before.setAttribute(AttributeKey.stringKey("storagePackageVersion"), storagePackageVersion);
before.setAttribute(AttributeKey.booleanKey("sync"), options.isSync());
before.setAttribute(AttributeKey.longKey("payloadSize"), options.getSize());
before.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
before.setAttribute(AttributeKey.booleanKey("faultInjectionForDownloads"), options.isFaultInjectionEnabledForDownloads());
before.setAttribute(AttributeKey.booleanKey("faultInjectionForUploads"), options.isFaultInjectionEnabledForUploads());
before.setAttribute(AttributeKey.stringKey("httpClientProvider"), options.getHttpClient().toString());
before.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
before.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
before.end();
logger.atInfo()
.addKeyValue("duration", options.getDuration())
.addKeyValue("payloadSize", options.getSize())
.addKeyValue("concurrency", options.getParallel())
.addKeyValue("faultInjectionForDownloads", options.isFaultInjectionEnabledForDownloads())
.addKeyValue("faultInjectionForUploads", options.isFaultInjectionEnabledForUploads())
.addKeyValue("storagePackageVersion", storagePackageVersion)
.addKeyValue("sync", options.isSync())
.addKeyValue("scenarioName", scenarioName)
.log("starting test");
logger.log(LogLevel.INFORMATIONAL, () -> "starting test");
}
/**
* Records an event representing the end of the test.
* @param startTime the start time of the test
*/
public void recordEnd(Instant startTime) {
Span after = startSampledInSpan("after run");
after.setAttribute(AttributeKey.longKey("succeeded"), successfulRuns.get());
after.setAttribute(AttributeKey.longKey("failed"), failedRuns.get());
after.setAttribute(AttributeKey.longKey("durationMs"), Instant.now().toEpochMilli() - startTime.toEpochMilli());
after.end();
logger.atInfo()
.addKeyValue("scenarioName", scenarioName)
.addKeyValue("succeeded", successfulRuns.get())
.addKeyValue("failed", failedRuns.get())
.log("test finished");
}
private Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
@FunctionalInterface
public interface ThrowingFunction {
void run(com.azure.core.util.Context context) throws Exception;
}
} | class TelemetryHelper {
private final Tracer tracer;
private final ClientLogger logger;
private static final AttributeKey<String> SCENARIO_NAME_ATTRIBUTE = AttributeKey.stringKey("scenario_name");
private static final AttributeKey<String> ERROR_TYPE_ATTRIBUTE = AttributeKey.stringKey("error.type");
private static final AttributeKey<Boolean> SAMPLE_IN_ATTRIBUTE = AttributeKey.booleanKey("sample.in");
private static final OpenTelemetry OTEL;
private final String scenarioName;
private final Meter meter;
private final DoubleHistogram runDuration;
private final Attributes commonAttributes;
private final Attributes canceledAttributes;
private final String packageType;
private final AtomicLong successfulRuns = new AtomicLong();
private final AtomicLong failedRuns = new AtomicLong();
static {
Schedulers.enableMetrics();
OTEL = init();
}
/**
* Creates an instance of telemetry helper.
* @param scenarioClass the scenario class
*/
/**
* Initializes telemetry helper: sets up Azure Monitor exporter, enables JVM metrics collection.
*/
public static OpenTelemetry init() {
if (OTEL != null) {
return OTEL;
}
System.setProperty("otel.java.global-autoconfigure.enabled", "true");
AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder = AutoConfiguredOpenTelemetrySdk.builder();
String applicationInsightsConnectionString = System.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING");
if (applicationInsightsConnectionString == null) {
System.setProperty("otel.traces.exporter", "none");
System.setProperty("otel.metrics.exporter", "none");
System.setProperty("otel.logs.exporter", "none");
} else {
new AzureMonitorExporterBuilder()
.connectionString(applicationInsightsConnectionString)
.install(sdkBuilder);
}
OpenTelemetry otel = sdkBuilder
.addSamplerCustomizer((sampler, props) -> new Sampler() {
@Override
public SamplingResult shouldSample(Context parentContext, String traceId, String name, SpanKind spanKind, Attributes attributes, List<LinkData> parentLinks) {
if (Boolean.TRUE.equals(attributes.get(SAMPLE_IN_ATTRIBUTE))) {
return SamplingResult.recordAndSample();
}
return sampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks);
}
@Override
public String getDescription() {
return sampler.getDescription();
}
})
.setResultAsGlobal()
.build()
.getOpenTelemetrySdk();
Classes.registerObservers(otel);
Cpu.registerObservers(otel);
MemoryPools.registerObservers(otel);
Threads.registerObservers(otel);
GarbageCollector.registerObservers(otel);
OpenTelemetryAppender.install(otel);
return otel;
}
/**
* Instruments a runnable: records runnable duration along with the status (success, error, cancellation),
* @param oneRun the runnable to instrument
*/
@SuppressWarnings("try")
public void instrumentRun(ThrowingFunction oneRun) {
Instant start = Instant.now();
Span span = tracer.spanBuilder("run").startSpan();
try (Scope s = span.makeCurrent()) {
com.azure.core.util.Context ctx = new com.azure.core.util.Context(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, io.opentelemetry.context.Context.current());
oneRun.run(ctx);
trackSuccess(start, span);
} catch (Throwable e) {
if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) {
trackCancellation(start, span);
} else {
trackFailure(start, e, span);
}
}
}
/**
* Instruments a Mono: records mono duration along with the status (success, error, cancellation),
* @param runAsync the mono to instrument
* @return the instrumented mono
*/
@SuppressWarnings("try")
public Mono<Void> instrumentRunAsync(Function<com.azure.core.util.Context, Mono<Void>> runAsync) {
return Mono.defer(() -> {
Instant start = Instant.now();
Span span = tracer.spanBuilder("runAsync").startSpan();
try (Scope s = span.makeCurrent()) {
com.azure.core.util.Context ctx = new com.azure.core.util.Context(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, io.opentelemetry.context.Context.current());
return runAsync.apply(ctx).doOnError(e -> trackFailure(start, e, span))
.doOnCancel(() -> trackCancellation(start, span))
.doOnSuccess(v -> trackSuccess(start, span))
.contextWrite(reactor.util.context.Context.of(com.azure.core.util.tracing.Tracer.PARENT_TRACE_CONTEXT_KEY, io.opentelemetry.context.Context.current()));
}
});
}
private void trackSuccess(Instant start, Span span) {
logger.atVerbose()
.addKeyValue("traceId", span.getSpanContext().getTraceId())
.addKeyValue("status", "success")
.log("run ended");
runDuration.record(getDuration(start), commonAttributes);
successfulRuns.incrementAndGet();
span.end();
logger.info("track success");
}
private void trackCancellation(Instant start, Span span) {
logger.atWarning()
.addKeyValue("error.type", "cancelled")
.log("run ended");
runDuration.record(getDuration(start), canceledAttributes);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, "cancelled");
span.setStatus(StatusCode.ERROR);
span.end();
}
private void trackFailure(Instant start, Throwable e, Span span) {
Throwable unwrapped = Exceptions.unwrap(e);
span.recordException(unwrapped);
span.setAttribute(ERROR_TYPE_ATTRIBUTE, unwrapped.getClass().getName());
span.setStatus(StatusCode.ERROR, unwrapped.getMessage());
String errorType = unwrapped.getClass().getName();
logger.atError()
.addKeyValue("error.type", errorType)
.addKeyValue("traceId", span.getSpanContext().getTraceId())
.log("run ended", unwrapped);
Attributes attributes = Attributes.of(SCENARIO_NAME_ATTRIBUTE, scenarioName, ERROR_TYPE_ATTRIBUTE, errorType);
runDuration.record(getDuration(start), attributes);
failedRuns.incrementAndGet();
logger.info("track failure");
span.end();
}
/**
* Records an event representing the start of a test along with test options.
* @param options test parameters
*/
public void recordStart(StorageStressOptions options) {
Span before = startSampledInSpan("before run");
before.setAttribute(AttributeKey.longKey("durationSec"), options.getDuration());
before.setAttribute(AttributeKey.stringKey("scenarioName"), scenarioName);
before.setAttribute(AttributeKey.longKey("concurrency"), options.getParallel());
before.setAttribute(AttributeKey.stringKey("storagePackageVersion"), this.packageType);
before.setAttribute(AttributeKey.booleanKey("sync"), options.isSync());
before.setAttribute(AttributeKey.longKey("payloadSize"), options.getSize());
before.setAttribute(AttributeKey.stringKey("hostname"), System.getenv().get("HOSTNAME"));
before.setAttribute(AttributeKey.booleanKey("faultInjectionForDownloads"), options.isFaultInjectionEnabledForDownloads());
before.setAttribute(AttributeKey.booleanKey("faultInjectionForUploads"), options.isFaultInjectionEnabledForUploads());
before.setAttribute(AttributeKey.stringKey("httpClientProvider"), options.getHttpClient().toString());
before.setAttribute(AttributeKey.stringKey("jreVersion"), System.getProperty("java.version"));
before.setAttribute(AttributeKey.stringKey("jreVendor"), System.getProperty("java.vendor"));
before.end();
logger.atInfo()
.addKeyValue("duration", options.getDuration())
.addKeyValue("payloadSize", options.getSize())
.addKeyValue("concurrency", options.getParallel())
.addKeyValue("faultInjectionForDownloads", options.isFaultInjectionEnabledForDownloads())
.addKeyValue("faultInjectionForUploads", options.isFaultInjectionEnabledForUploads())
.addKeyValue("storagePackageVersion", this.packageType)
.addKeyValue("sync", options.isSync())
.addKeyValue("scenarioName", scenarioName)
.log("starting test");
logger.log(LogLevel.INFORMATIONAL, () -> "starting test");
}
/**
* Records an event representing the end of the test.
* @param startTime the start time of the test
*/
public void recordEnd(Instant startTime) {
Span after = startSampledInSpan("after run");
after.setAttribute(AttributeKey.longKey("succeeded"), successfulRuns.get());
after.setAttribute(AttributeKey.longKey("failed"), failedRuns.get());
after.setAttribute(AttributeKey.longKey("durationMs"), Instant.now().toEpochMilli() - startTime.toEpochMilli());
after.end();
logger.atInfo()
.addKeyValue("scenarioName", scenarioName)
.addKeyValue("succeeded", successfulRuns.get())
.addKeyValue("failed", failedRuns.get())
.log("test finished");
}
private Span startSampledInSpan(String name) {
return tracer.spanBuilder(name)
.setAttribute(SAMPLE_IN_ATTRIBUTE, true)
.startSpan();
}
private static double getDuration(Instant start) {
return Math.max(0d, Instant.now().toEpochMilli() - start.toEpochMilli()) / 1000d;
}
@FunctionalInterface
public interface ThrowingFunction {
void run(com.azure.core.util.Context context) throws Exception;
}
} |
This should continue to use `serializer` as that is meant to handle the custom types that may be provided. | private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(stream);
writer.write
writer.close();
byte[] bytes = stream.toByteArray();
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
} | Object currentLog = iterator.next(); | private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
/*private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
}*/
private LogsIngestionRequest createRequest(boolean last) throws IOException {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream);
try {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.close();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
private LogsIngestionRequest createRequest(boolean last) throws IOException {
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.flush();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} |
This should be turned into a try-with-resources just in case writing fails. Right now, and with the code being replaced, the resource could be left unclosed. ``` try (JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) { ``` | private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(stream);
writer.write
writer.close();
byte[] bytes = stream.toByteArray();
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
} | return result; | private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
/*private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
}*/
private LogsIngestionRequest createRequest(boolean last) throws IOException {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream);
try {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.close();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
private LogsIngestionRequest createRequest(boolean last) throws IOException {
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.flush();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} |
Have we verified that the logs are written in the right format and are available to query in the logs workspace? | private LogsIngestionRequest createRequest(boolean last) throws IOException {
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.flush();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
} | } | private LogsIngestionRequest createRequest(boolean last) throws IOException {
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.flush();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} |
@srnagar Yes, I've verified that the format of the logs within the portal are the same both before and after the changes in this PR. | private LogsIngestionRequest createRequest(boolean last) throws IOException {
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.flush();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
} | } | private LogsIngestionRequest createRequest(boolean last) throws IOException {
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
JsonWriter writer = JsonProviders.createWriter(byteArrayOutputStream)) {
writer.writeStartArray();
for (String log : serializedLogs) {
writer.writeRawValue(log);
}
writer.writeEndArray();
writer.flush();
byte[] zippedRequestBody = gzipRequest(byteArrayOutputStream.toByteArray());
return new LogsIngestionRequest(originalLogsRequest, zippedRequestBody);
} finally {
if (!last) {
originalLogsRequest = new ArrayList<>();
serializedLogs.clear();
}
}
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} | class Batcher implements Iterator<LogsIngestionRequest> {
private static final ClientLogger LOGGER = new ClientLogger(Batcher.class);
private static final JsonSerializer DEFAULT_SERIALIZER = JsonSerializerProviders.createInstance(true);
private final ObjectSerializer serializer;
private final int concurrency;
private final Iterator<Object> iterator;
private long currentBatchSize;
private List<String> serializedLogs;
private List<Object> originalLogsRequest;
public Batcher(LogsUploadOptions options, Iterable<Object> logs) {
this.serializer = getSerializer(options);
this.concurrency = getConcurrency(options);
this.serializedLogs = new ArrayList<>();
this.originalLogsRequest = new ArrayList<>();
this.iterator = logs.iterator();
}
/**
* Checks if there are more logs to batch. This method is not thread safe!
*
* When used concurrently, it should be synchronized along with {@link Batcher
*
* <pre>{@code
* synchronized (batcher) {
* if (batcher.hasNext()) {
* request = batcher.next();
* }
* }
* }</pre>
*/
@Override
public boolean hasNext() {
return iterator.hasNext() || currentBatchSize > 0;
}
/**
* Collects next batch and serializes it into {@link LogsIngestionRequest}. This method is not thread-safe!
*
* Returns null when complete.
*/
@Override
public LogsIngestionRequest next() {
try {
return nextInternal();
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/**
* Creates stream of requests split for configured concurrency. Returns parallel stream if concurrency is bigger than 1.
*/
public Stream<LogsIngestionRequest> toStream() {
if (concurrency == 1) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(this, Spliterator.NONNULL | Spliterator.ORDERED), false);
}
return StreamSupport.stream(new ConcurrencyLimitingSpliterator<>(this, concurrency), true);
}
/**
* Creates flux with requests.
*/
public Flux<LogsIngestionRequest> toFlux() {
return Flux.create(emitter -> {
try {
while (hasNext()) {
LogsIngestionRequest next = nextInternal();
if (next != null) {
emitter.next(next);
}
}
} catch (IOException ex) {
emitter.error(ex);
}
emitter.complete();
});
}
private LogsIngestionRequest nextInternal() throws IOException {
LogsIngestionRequest result = null;
while (iterator.hasNext() && result == null) {
Object currentLog = iterator.next();
byte[] bytes = serializer.serializeToBytes(currentLog);
currentBatchSize += bytes.length;
if (currentBatchSize > MAX_REQUEST_PAYLOAD_SIZE) {
result = createRequest(false);
currentBatchSize = bytes.length;
}
serializedLogs.add(new String(bytes, StandardCharsets.UTF_8));
originalLogsRequest.add(currentLog);
}
if (result == null && currentBatchSize > 0) {
currentBatchSize = 0;
return createRequest(true);
}
return result;
}
private static ObjectSerializer getSerializer(LogsUploadOptions options) {
if (options != null && options.getObjectSerializer() != null) {
return options.getObjectSerializer();
}
return DEFAULT_SERIALIZER;
}
} |
@srnagar should we change code generation to use `HttpHeaders` in this case? If that isn't possible, I think this will need to remain as `Map<String, ?>` | public void setHeaders(Object[] swaggerMethodArguments, HttpHeaders headers, ObjectSerializer serializer) {
headers.setAllHeaders(requestHeaders);
if (swaggerMethodArguments == null) {
return;
}
for (HeaderSubstitution headerSubstitution : headerSubstitutions) {
final int parameterIndex = headerSubstitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[headerSubstitution.getMethodParameterIndex()];
if (methodArgument instanceof Map) {
@SuppressWarnings("unchecked") final Map<HttpHeaderName, ?> headerCollection =
(Map<HttpHeaderName, ?>) methodArgument;
final String headerCollectionPrefix = headerSubstitution.getUrlParameterName();
for (final Map.Entry<HttpHeaderName, ?> headerCollectionEntry : headerCollection.entrySet()) {
final String headerName = headerCollectionPrefix + headerCollectionEntry.getKey();
final String headerValue = serialize(serializer, headerCollectionEntry.getValue());
if (headerValue != null) {
headers.set(HttpHeaderName.fromString(headerName), headerValue);
}
}
} else {
final String headerValue = serialize(serializer, methodArgument);
if (headerValue != null) {
headers.set(headerSubstitution.getHeaderName(), headerValue);
}
}
}
}
} | @SuppressWarnings("unchecked") final Map<HttpHeaderName, ?> headerCollection = | public void setHeaders(Object[] swaggerMethodArguments, HttpHeaders headers, ObjectSerializer serializer) {
headers.setAll(requestHeaders);
if (swaggerMethodArguments == null) {
return;
}
for (HeaderSubstitution headerSubstitution : headerSubstitutions) {
final int parameterIndex = headerSubstitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[headerSubstitution.getMethodParameterIndex()];
if (methodArgument instanceof Map) {
@SuppressWarnings("unchecked") final Map<HttpHeaderName, ?> headerCollection =
(Map<HttpHeaderName, ?>) methodArgument;
final String headerCollectionPrefix = headerSubstitution.getUrlParameterName();
for (final Map.Entry<HttpHeaderName, ?> headerCollectionEntry : headerCollection.entrySet()) {
final String headerName = headerCollectionPrefix + headerCollectionEntry.getKey();
final String headerValue = serialize(serializer, headerCollectionEntry.getValue());
if (headerValue != null) {
headers.set(HttpHeaderName.fromString(headerName), headerValue);
}
}
} else {
final String headerValue = serialize(serializer, methodArgument);
if (headerValue != null) {
headers.set(headerSubstitution.getHeaderName(), headerValue);
}
}
}
}
} | class && requestOptionsPosition == -1) {
requestOptionsPosition = i;
} | class && requestOptionsPosition == -1) {
requestOptionsPosition = i;
} |
Let's keep this `Map<String, ?>` for now as this would require changes in multiple layers to switch to HttpHeaders in codegen. | public void setHeaders(Object[] swaggerMethodArguments, HttpHeaders headers, ObjectSerializer serializer) {
headers.setAllHeaders(requestHeaders);
if (swaggerMethodArguments == null) {
return;
}
for (HeaderSubstitution headerSubstitution : headerSubstitutions) {
final int parameterIndex = headerSubstitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[headerSubstitution.getMethodParameterIndex()];
if (methodArgument instanceof Map) {
@SuppressWarnings("unchecked") final Map<HttpHeaderName, ?> headerCollection =
(Map<HttpHeaderName, ?>) methodArgument;
final String headerCollectionPrefix = headerSubstitution.getUrlParameterName();
for (final Map.Entry<HttpHeaderName, ?> headerCollectionEntry : headerCollection.entrySet()) {
final String headerName = headerCollectionPrefix + headerCollectionEntry.getKey();
final String headerValue = serialize(serializer, headerCollectionEntry.getValue());
if (headerValue != null) {
headers.set(HttpHeaderName.fromString(headerName), headerValue);
}
}
} else {
final String headerValue = serialize(serializer, methodArgument);
if (headerValue != null) {
headers.set(headerSubstitution.getHeaderName(), headerValue);
}
}
}
}
} | @SuppressWarnings("unchecked") final Map<HttpHeaderName, ?> headerCollection = | public void setHeaders(Object[] swaggerMethodArguments, HttpHeaders headers, ObjectSerializer serializer) {
headers.setAll(requestHeaders);
if (swaggerMethodArguments == null) {
return;
}
for (HeaderSubstitution headerSubstitution : headerSubstitutions) {
final int parameterIndex = headerSubstitution.getMethodParameterIndex();
if (0 <= parameterIndex && parameterIndex < swaggerMethodArguments.length) {
final Object methodArgument = swaggerMethodArguments[headerSubstitution.getMethodParameterIndex()];
if (methodArgument instanceof Map) {
@SuppressWarnings("unchecked") final Map<HttpHeaderName, ?> headerCollection =
(Map<HttpHeaderName, ?>) methodArgument;
final String headerCollectionPrefix = headerSubstitution.getUrlParameterName();
for (final Map.Entry<HttpHeaderName, ?> headerCollectionEntry : headerCollection.entrySet()) {
final String headerName = headerCollectionPrefix + headerCollectionEntry.getKey();
final String headerValue = serialize(serializer, headerCollectionEntry.getValue());
if (headerValue != null) {
headers.set(HttpHeaderName.fromString(headerName), headerValue);
}
}
} else {
final String headerValue = serialize(serializer, methodArgument);
if (headerValue != null) {
headers.set(headerSubstitution.getHeaderName(), headerValue);
}
}
}
}
} | class && requestOptionsPosition == -1) {
requestOptionsPosition = i;
} | class && requestOptionsPosition == -1) {
requestOptionsPosition = i;
} |
Remove commented out snippet? | public void testRedactRequestBodyRegex() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.POST, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/x-www-form-urlencoded");
request.setBody("first_value=value&client_secret=aVerySecretSecret&other=value&is=cool");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals(record.getRequestBody(), "first_value=value&client_secret=REDACTED&other=value&is=cool");
} | public void testRedactRequestBodyRegex() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Collections.singletonList("Accept")));
HttpRequest request = new HttpRequest(HttpMethod.POST, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/x-www-form-urlencoded");
request.setBody("first_value=value&client_secret=aVerySecretSecret&other=value&is=cool");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals(record.getRequestBody(), "first_value=value&client_secret=REDACTED&other=value&is=cool");
} | class TestProxyTests extends TestProxyTestBase {
public static final String TEST_DATA = "{\"test\":\"proxy\"}";
static TestProxyTestServer server;
private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>();
public static final String REDACTED = "REDACTED";
private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY
= HttpHeaderName.fromString("Ocp-Apim-Subscription-Key");
static {
CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY));
CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED,
TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename"));
}
@BeforeAll
public static void setupClass() {
server = new TestProxyTestServer();
}
@AfterAll
public static void teardownClass() {
server.close();
}
@Test
@Tag("Record")
public void testBasicRecord() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testOrdering() {
String name = testResourceNamer.randomName("test", 10);
assertEquals("test32950", name);
}
@Test
@Tag("Record")
@DoNotRecord
public void testDoNotRecord() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
@DoNotRecord
public void testDoNotPlayback() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
public void testMismatch() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE));
assertTrue(thrown.getMessage().contains("Uri doesn't match"));
}
@Test
@Tag("Record")
@RecordWithoutRequestBody
public void testRecordWithPath() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request
= new HttpRequest(HttpMethod.POST, "http:
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length()));
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Record")
public void testRecordWithHeaders() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.fromString("header1"), "value1")
.setHeader(HttpHeaderName.fromString("header2"), "value2");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testPlayback() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals("first path", response.getBodyAsBinaryData().toString());
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Live")
public void testCannotGetPlaybackClient() {
RuntimeException thrown
= assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient());
assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage());
}
@Test
@Tag("Live")
public void testCannotGetRecordPolicy() {
RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy());
assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage());
}
@Test
@Tag("Playback")
public void testRecordWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(response.getStatusCode(), 200);
assertEquals(200, response.getStatusCode());
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key"));
assertTrue(record.getResponseHeaders()
.get("Operation-Location")
.startsWith("https:
assertEquals(REDACTED, record.getResponse().get("modelId"));
assertEquals(REDACTED, record.getResponse().get("client_secret"));
}
}
@Test
@Tag("Playback")
public void testPlaybackWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(Collections.singletonList(
new CustomMatcher().setExcludedHeaders(Collections.singletonList("Ocp-Apim-Subscription-Key"))));
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testBodyRegexRedactRecord() {
HttpClient client = interceptorManager.getPlaybackClient();
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Collections.singletonList("Accept")));
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertTrue(record.getResponse()
.get("Body")
.contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>"));
assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>"));
assertEquals(record.getResponse().get("TableName"), REDACTED);
}
@Test
@Tag("Playback")
@Test
@Tag("Live")
public void canGetTestProxyVersion() {
String version = TestProxyUtils.getTestProxyVersion(this.getTestClassPath());
assertNotNull(version);
}
@Test
@Tag("Record")
public void testResetTestProxyData() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
final HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
try (HttpResponse response
= pipeline.sendSync(new HttpRequest(HttpMethod.GET, "http:
assertEquals(200, response.getStatusCode());
HttpHeaders headers = response.getRequest().getHeaders();
assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-id")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip")));
}
}
@Test
@Tag("Record")
public void testRecordWithRedirect() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client)
.policies(new RedirectPolicy(), interceptorManager.getRecordPolicy())
.build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
assertEquals("http:
response.getRequest().getUrl().toString());
assertNull(
response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
}
}
private RecordedTestProxyData readDataFromFile() {
try {
BufferedReader reader = Files.newBufferedReader(Paths.get(interceptorManager.getRecordingFileLocation()));
return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class RecordedTestProxyData {
@JsonProperty("Entries")
private final LinkedList<TestProxyDataRecord> testProxyDataRecords;
RecordedTestProxyData() {
testProxyDataRecords = new LinkedList<>();
}
public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() {
return testProxyDataRecords;
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class TestProxyDataRecord {
@JsonProperty("RequestMethod")
private String method;
@JsonProperty("RequestUri")
private String uri;
@JsonProperty("RequestHeaders")
private Map<String, String> headers;
@JsonProperty("ResponseBody")
private Map<String, String> response;
@JsonProperty("ResponseHeaders")
private Map<String, String> responseHeaders;
@JsonProperty("RequestBody")
private String requestBody;
public String getMethod() {
return method;
}
public String getUri() {
return uri;
}
public Map<String, String> getHeaders() {
return headers;
}
public Map<String, String> getResponse() {
return response;
}
public Map<String, String> getResponseHeaders() {
return responseHeaders;
}
public String getRequestBody() {
return requestBody;
}
}
}
} | class TestProxyTests extends TestProxyTestBase {
public static final String TEST_DATA = "{\"test\":\"proxy\"}";
static TestProxyTestServer server;
private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>();
public static final String REDACTED = "REDACTED";
private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY
= HttpHeaderName.fromString("Ocp-Apim-Subscription-Key");
static {
CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY));
CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED,
TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename"));
}
@BeforeAll
public static void setupClass() {
server = new TestProxyTestServer();
}
@AfterAll
public static void teardownClass() {
server.close();
}
@Test
@Tag("Record")
public void testBasicRecord() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testOrdering() {
String name = testResourceNamer.randomName("test", 10);
assertEquals("test32950", name);
}
@Test
@Tag("Record")
@DoNotRecord
public void testDoNotRecord() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
@DoNotRecord
public void testDoNotPlayback() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
public void testMismatch() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE));
assertTrue(thrown.getMessage().contains("Uri doesn't match"));
}
@Test
@Tag("Record")
@RecordWithoutRequestBody
public void testRecordWithPath() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request
= new HttpRequest(HttpMethod.POST, "http:
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length()));
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Record")
public void testRecordWithHeaders() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.fromString("header1"), "value1")
.setHeader(HttpHeaderName.fromString("header2"), "value2");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testPlayback() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals("first path", response.getBodyAsBinaryData().toString());
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Live")
public void testCannotGetPlaybackClient() {
RuntimeException thrown
= assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient());
assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage());
}
@Test
@Tag("Live")
public void testCannotGetRecordPolicy() {
RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy());
assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage());
}
@Test
@Tag("Playback")
public void testRecordWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(response.getStatusCode(), 200);
assertEquals(200, response.getStatusCode());
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key"));
assertTrue(record.getResponseHeaders()
.get("Operation-Location")
.startsWith("https:
assertEquals(REDACTED, record.getResponse().get("modelId"));
assertEquals(REDACTED, record.getResponse().get("client_secret"));
}
}
@Test
@Tag("Playback")
public void testPlaybackWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(Collections.singletonList(
new CustomMatcher().setExcludedHeaders(Collections.singletonList("Ocp-Apim-Subscription-Key"))));
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testBodyRegexRedactRecord() {
HttpClient client = interceptorManager.getPlaybackClient();
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Collections.singletonList("Accept")));
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertTrue(record.getResponse()
.get("Body")
.contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>"));
assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>"));
assertEquals(record.getResponse().get("TableName"), REDACTED);
}
@Test
@Tag("Playback")
@Test
@Tag("Live")
public void canGetTestProxyVersion() {
String version = TestProxyUtils.getTestProxyVersion(this.getTestClassPath());
assertNotNull(version);
}
@Test
@Tag("Record")
public void testResetTestProxyData() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
final HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
try (HttpResponse response
= pipeline.sendSync(new HttpRequest(HttpMethod.GET, "http:
assertEquals(200, response.getStatusCode());
HttpHeaders headers = response.getRequest().getHeaders();
assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-id")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip")));
}
}
@Test
@Tag("Record")
public void testRecordWithRedirect() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client)
.policies(new RedirectPolicy(), interceptorManager.getRecordPolicy())
.build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
assertEquals("http:
response.getRequest().getUrl().toString());
assertNull(
response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
}
}
private RecordedTestProxyData readDataFromFile() {
try {
BufferedReader reader = Files.newBufferedReader(Paths.get(interceptorManager.getRecordingFileLocation()));
return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class RecordedTestProxyData {
@JsonProperty("Entries")
private final LinkedList<TestProxyDataRecord> testProxyDataRecords;
RecordedTestProxyData() {
testProxyDataRecords = new LinkedList<>();
}
public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() {
return testProxyDataRecords;
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class TestProxyDataRecord {
@JsonProperty("RequestMethod")
private String method;
@JsonProperty("RequestUri")
private String uri;
@JsonProperty("RequestHeaders")
private Map<String, String> headers;
@JsonProperty("ResponseBody")
private Map<String, String> response;
@JsonProperty("ResponseHeaders")
private Map<String, String> responseHeaders;
@JsonProperty("RequestBody")
private String requestBody;
public String getMethod() {
return method;
}
public String getUri() {
return uri;
}
public Map<String, String> getHeaders() {
return headers;
}
public Map<String, String> getResponse() {
return response;
}
public Map<String, String> getResponseHeaders() {
return responseHeaders;
}
public String getRequestBody() {
return requestBody;
}
}
}
} | |
I left it because for our infra tests here, when we change something for a playback we re-record it with the same method. So this is the "record" path that isn't persisted but handy to have around. | public void testRedactRequestBodyRegex() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.POST, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/x-www-form-urlencoded");
request.setBody("first_value=value&client_secret=aVerySecretSecret&other=value&is=cool");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals(record.getRequestBody(), "first_value=value&client_secret=REDACTED&other=value&is=cool");
} | public void testRedactRequestBodyRegex() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Collections.singletonList("Accept")));
HttpRequest request = new HttpRequest(HttpMethod.POST, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/x-www-form-urlencoded");
request.setBody("first_value=value&client_secret=aVerySecretSecret&other=value&is=cool");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals(record.getRequestBody(), "first_value=value&client_secret=REDACTED&other=value&is=cool");
} | class TestProxyTests extends TestProxyTestBase {
public static final String TEST_DATA = "{\"test\":\"proxy\"}";
static TestProxyTestServer server;
private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>();
public static final String REDACTED = "REDACTED";
private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY
= HttpHeaderName.fromString("Ocp-Apim-Subscription-Key");
static {
CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY));
CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED,
TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename"));
}
@BeforeAll
public static void setupClass() {
server = new TestProxyTestServer();
}
@AfterAll
public static void teardownClass() {
server.close();
}
@Test
@Tag("Record")
public void testBasicRecord() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testOrdering() {
String name = testResourceNamer.randomName("test", 10);
assertEquals("test32950", name);
}
@Test
@Tag("Record")
@DoNotRecord
public void testDoNotRecord() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
@DoNotRecord
public void testDoNotPlayback() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
public void testMismatch() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE));
assertTrue(thrown.getMessage().contains("Uri doesn't match"));
}
@Test
@Tag("Record")
@RecordWithoutRequestBody
public void testRecordWithPath() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request
= new HttpRequest(HttpMethod.POST, "http:
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length()));
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Record")
public void testRecordWithHeaders() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.fromString("header1"), "value1")
.setHeader(HttpHeaderName.fromString("header2"), "value2");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testPlayback() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals("first path", response.getBodyAsBinaryData().toString());
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Live")
public void testCannotGetPlaybackClient() {
RuntimeException thrown
= assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient());
assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage());
}
@Test
@Tag("Live")
public void testCannotGetRecordPolicy() {
RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy());
assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage());
}
@Test
@Tag("Playback")
public void testRecordWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(response.getStatusCode(), 200);
assertEquals(200, response.getStatusCode());
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key"));
assertTrue(record.getResponseHeaders()
.get("Operation-Location")
.startsWith("https:
assertEquals(REDACTED, record.getResponse().get("modelId"));
assertEquals(REDACTED, record.getResponse().get("client_secret"));
}
}
@Test
@Tag("Playback")
public void testPlaybackWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(Collections.singletonList(
new CustomMatcher().setExcludedHeaders(Collections.singletonList("Ocp-Apim-Subscription-Key"))));
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testBodyRegexRedactRecord() {
HttpClient client = interceptorManager.getPlaybackClient();
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Collections.singletonList("Accept")));
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertTrue(record.getResponse()
.get("Body")
.contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>"));
assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>"));
assertEquals(record.getResponse().get("TableName"), REDACTED);
}
@Test
@Tag("Playback")
@Test
@Tag("Live")
public void canGetTestProxyVersion() {
String version = TestProxyUtils.getTestProxyVersion(this.getTestClassPath());
assertNotNull(version);
}
@Test
@Tag("Record")
public void testResetTestProxyData() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
final HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
try (HttpResponse response
= pipeline.sendSync(new HttpRequest(HttpMethod.GET, "http:
assertEquals(200, response.getStatusCode());
HttpHeaders headers = response.getRequest().getHeaders();
assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-id")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip")));
}
}
@Test
@Tag("Record")
public void testRecordWithRedirect() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client)
.policies(new RedirectPolicy(), interceptorManager.getRecordPolicy())
.build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
assertEquals("http:
response.getRequest().getUrl().toString());
assertNull(
response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
}
}
private RecordedTestProxyData readDataFromFile() {
try {
BufferedReader reader = Files.newBufferedReader(Paths.get(interceptorManager.getRecordingFileLocation()));
return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class RecordedTestProxyData {
@JsonProperty("Entries")
private final LinkedList<TestProxyDataRecord> testProxyDataRecords;
RecordedTestProxyData() {
testProxyDataRecords = new LinkedList<>();
}
public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() {
return testProxyDataRecords;
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class TestProxyDataRecord {
@JsonProperty("RequestMethod")
private String method;
@JsonProperty("RequestUri")
private String uri;
@JsonProperty("RequestHeaders")
private Map<String, String> headers;
@JsonProperty("ResponseBody")
private Map<String, String> response;
@JsonProperty("ResponseHeaders")
private Map<String, String> responseHeaders;
@JsonProperty("RequestBody")
private String requestBody;
public String getMethod() {
return method;
}
public String getUri() {
return uri;
}
public Map<String, String> getHeaders() {
return headers;
}
public Map<String, String> getResponse() {
return response;
}
public Map<String, String> getResponseHeaders() {
return responseHeaders;
}
public String getRequestBody() {
return requestBody;
}
}
}
} | class TestProxyTests extends TestProxyTestBase {
public static final String TEST_DATA = "{\"test\":\"proxy\"}";
static TestProxyTestServer server;
private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT);
private static final List<TestProxySanitizer> CUSTOM_SANITIZER = new ArrayList<>();
public static final String REDACTED = "REDACTED";
private static final HttpHeaderName OCP_APIM_SUBSCRIPTION_KEY
= HttpHeaderName.fromString("Ocp-Apim-Subscription-Key");
static {
CUSTOM_SANITIZER.add(new TestProxySanitizer("$..modelId", null, REDACTED, TestProxySanitizerType.BODY_KEY));
CUSTOM_SANITIZER.add(new TestProxySanitizer("TableName\\\"*:*\\\"(?<tablename>.*)\\\"", REDACTED,
TestProxySanitizerType.BODY_REGEX).setGroupForReplace("tablename"));
}
@BeforeAll
public static void setupClass() {
server = new TestProxyTestServer();
}
@AfterAll
public static void teardownClass() {
server.close();
}
@Test
@Tag("Record")
public void testBasicRecord() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testOrdering() {
String name = testResourceNamer.randomName("test", 10);
assertEquals("test32950", name);
}
@Test
@Tag("Record")
@DoNotRecord
public void testDoNotRecord() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
@DoNotRecord
public void testDoNotPlayback() {
testResourceNamer.now();
}
@Test
@Tag("Playback")
public void testMismatch() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
RuntimeException thrown = assertThrows(RuntimeException.class, () -> client.sendSync(request, Context.NONE));
assertTrue(thrown.getMessage().contains("Uri doesn't match"));
}
@Test
@Tag("Record")
@RecordWithoutRequestBody
public void testRecordWithPath() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request
= new HttpRequest(HttpMethod.POST, "http:
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.CONTENT_LENGTH, String.valueOf(TEST_DATA.length()));
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Record")
public void testRecordWithHeaders() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
testResourceNamer.randomName("test", 10);
testResourceNamer.now();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.fromString("header1"), "value1")
.setHeader(HttpHeaderName.fromString("header2"), "value2");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testPlayback() {
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals("first path", response.getBodyAsBinaryData().toString());
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Live")
public void testCannotGetPlaybackClient() {
RuntimeException thrown
= assertThrows(IllegalStateException.class, () -> interceptorManager.getPlaybackClient());
assertEquals("A playback client can only be requested in PLAYBACK mode.", thrown.getMessage());
}
@Test
@Tag("Live")
public void testCannotGetRecordPolicy() {
RuntimeException thrown = assertThrows(IllegalStateException.class, () -> interceptorManager.getRecordPolicy());
assertEquals("A recording policy can only be requested in RECORD mode.", thrown.getMessage());
}
@Test
@Tag("Playback")
public void testRecordWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
HttpClient client = interceptorManager.getPlaybackClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(response.getStatusCode(), 200);
assertEquals(200, response.getStatusCode());
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertEquals(REDACTED, record.getHeaders().get("Ocp-Apim-Subscription-Key"));
assertTrue(record.getResponseHeaders()
.get("Operation-Location")
.startsWith("https:
assertEquals(REDACTED, record.getResponse().get("modelId"));
assertEquals(REDACTED, record.getResponse().get("client_secret"));
}
}
@Test
@Tag("Playback")
public void testPlaybackWithRedaction() {
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(Collections.singletonList(
new CustomMatcher().setExcludedHeaders(Collections.singletonList("Ocp-Apim-Subscription-Key"))));
HttpClient client = interceptorManager.getPlaybackClient();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
.setHeader(OCP_APIM_SUBSCRIPTION_KEY, "SECRET_API_KEY")
.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json")
.setHeader(HttpHeaderName.ACCEPT, "*/*");
try (HttpResponse response = client.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
}
@Test
@Tag("Playback")
public void testBodyRegexRedactRecord() {
HttpClient client = interceptorManager.getPlaybackClient();
interceptorManager.addSanitizers(CUSTOM_SANITIZER);
interceptorManager.addMatchers(new CustomMatcher().setHeadersKeyOnlyMatch(Collections.singletonList("Accept")));
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client).build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
request.setHeader(HttpHeaderName.CONTENT_TYPE, "application/json");
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
}
RecordedTestProxyData recordedTestProxyData = readDataFromFile();
RecordedTestProxyData.TestProxyDataRecord record = recordedTestProxyData.getTestProxyDataRecords().get(0);
assertEquals("http:
assertTrue(record.getResponse()
.get("Body")
.contains("<UserDelegationKey><SignedTid>REDACTED</SignedTid></UserDelegationKey>"));
assertTrue(record.getResponse().get("primaryKey").contains("<PrimaryKey>REDACTED</PrimaryKey>"));
assertEquals(record.getResponse().get("TableName"), REDACTED);
}
@Test
@Tag("Playback")
@Test
@Tag("Live")
public void canGetTestProxyVersion() {
String version = TestProxyUtils.getTestProxyVersion(this.getTestClassPath());
assertNotNull(version);
}
@Test
@Tag("Record")
public void testResetTestProxyData() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
final HttpPipeline pipeline
= new HttpPipelineBuilder().httpClient(client).policies(interceptorManager.getRecordPolicy()).build();
try (HttpResponse response
= pipeline.sendSync(new HttpRequest(HttpMethod.GET, "http:
assertEquals(200, response.getStatusCode());
HttpHeaders headers = response.getRequest().getHeaders();
assertNull(headers.get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-mode")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-id")));
assertNull(headers.get(HttpHeaderName.fromString("x-recording-skip")));
}
}
@Test
@Tag("Record")
public void testRecordWithRedirect() {
HttpURLConnectionHttpClient client = new HttpURLConnectionHttpClient();
HttpPipeline pipeline = new HttpPipelineBuilder().httpClient(client)
.policies(new RedirectPolicy(), interceptorManager.getRecordPolicy())
.build();
HttpRequest request = new HttpRequest(HttpMethod.GET, "http:
try (HttpResponse response = pipeline.sendSync(request, Context.NONE)) {
assertEquals(200, response.getStatusCode());
assertEquals("http:
response.getRequest().getUrl().toString());
assertNull(
response.getRequest().getHeaders().get(HttpHeaderName.fromString("x-recording-upstream-base-uri")));
}
}
private RecordedTestProxyData readDataFromFile() {
try {
BufferedReader reader = Files.newBufferedReader(Paths.get(interceptorManager.getRecordingFileLocation()));
return RECORD_MAPPER.readValue(reader, RecordedTestProxyData.class);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class RecordedTestProxyData {
@JsonProperty("Entries")
private final LinkedList<TestProxyDataRecord> testProxyDataRecords;
RecordedTestProxyData() {
testProxyDataRecords = new LinkedList<>();
}
public LinkedList<TestProxyDataRecord> getTestProxyDataRecords() {
return testProxyDataRecords;
}
@JsonIgnoreProperties(ignoreUnknown = true)
static class TestProxyDataRecord {
@JsonProperty("RequestMethod")
private String method;
@JsonProperty("RequestUri")
private String uri;
@JsonProperty("RequestHeaders")
private Map<String, String> headers;
@JsonProperty("ResponseBody")
private Map<String, String> response;
@JsonProperty("ResponseHeaders")
private Map<String, String> responseHeaders;
@JsonProperty("RequestBody")
private String requestBody;
public String getMethod() {
return method;
}
public String getUri() {
return uri;
}
public Map<String, String> getHeaders() {
return headers;
}
public Map<String, String> getResponse() {
return response;
}
public Map<String, String> getResponseHeaders() {
return responseHeaders;
}
public String getRequestBody() {
return requestBody;
}
}
}
} | |
it allows to query all messages with something like message == "terminating work" and reason == "sink-error" instead of doing unreliable string parsing | private void terminate(WorkTerminalState terminalState) {
if (isTerminated.getAndSet(true)) {
return;
}
try {
timers.dispose();
} finally {
if (terminalState == WorkTerminalState.SINK_ERROR) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-error")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.CANCELED) {
assertCondition(isCanceled(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-canceled")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.RECEIVED_DEMANDED) {
assertCondition(hasReceivedDemanded(), terminalState);
withPendingKey(logger.atVerbose()).log(TERMINATING_WORK);
closeWindow();
return;
}
if (terminalState == WorkTerminalState.CONSUMER_ERROR) {
assertCondition(hasConsumerError(), terminalState);
final Throwable e = consumerError.get();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.TIMED_OUT) {
assertCondition(hasTimedOut(), terminalState);
final TimeoutReason reason = timeoutReason.get();
final Throwable e = reason.getError();
if (e != null) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK, e);
closeWindow(e);
} else {
withPendingKey(logger.atVerbose())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK);
closeWindow();
}
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL) {
assertCondition(parent.isDoneOrCanceled(), terminalState);
final Throwable e = parent.getTerminalError();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL_CLEAN_CLOSE) {
assertCondition(parent.isDoneOrCanceled() && isStreaming(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "terminal-clean-close")
.log(TERMINATING_WORK);
closeWindow();
return;
}
}
throw logger.atError().log(new IllegalStateException("Unknown work terminal state." + terminalState));
} | .log(TERMINATING_WORK); | private void terminate(WorkTerminalState terminalState) {
if (isTerminated.getAndSet(true)) {
return;
}
try {
timers.dispose();
} finally {
if (terminalState == WorkTerminalState.SINK_ERROR) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-error")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.CANCELED) {
assertCondition(isCanceled(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-canceled")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.RECEIVED_DEMANDED) {
assertCondition(hasReceivedDemanded(), terminalState);
withPendingKey(logger.atVerbose()).log(TERMINATING_WORK);
closeWindow();
return;
}
if (terminalState == WorkTerminalState.CONSUMER_ERROR) {
assertCondition(hasConsumerError(), terminalState);
final Throwable e = consumerError.get();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.TIMED_OUT) {
assertCondition(hasTimedOut(), terminalState);
final TimeoutReason reason = timeoutReason.get();
final Throwable e = reason.getError();
if (e != null) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK, e);
closeWindow(e);
} else {
withPendingKey(logger.atVerbose())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK);
closeWindow();
}
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL) {
assertCondition(parent.isDoneOrCanceled(), terminalState);
final Throwable e = parent.getTerminalError();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL_CLEAN_CLOSE) {
assertCondition(parent.isDoneOrCanceled() && isStreaming(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "terminal-clean-close")
.log(TERMINATING_WORK);
closeWindow();
return;
}
}
throw logger.atError().log(new IllegalStateException("Unknown work terminal state." + terminalState));
} | class WindowWork<T> {
private static final String DEMAND_KEY = "demand";
private static final String PENDING_KEY = "pending";
public static final String SIGNAL_TYPE_KEY = "signalType";
public static final String EMIT_RESULT_KEY = "emitResult";
private static final String TERMINATING_WORK = "Terminating the work.";
private final AtomicBoolean isInitialized = new AtomicBoolean(false);
private final AtomicBoolean isCanceled = new AtomicBoolean(false);
private final AtomicBoolean isTerminated = new AtomicBoolean(false);
private final AtomicReference<TimeoutReason> timeoutReason = new AtomicReference<>(null);
private final AtomicReference<Throwable> consumerError = new AtomicReference<>(null);
private final ClientLogger logger;
private final WindowedSubscriber<T> parent;
private final int demand;
private final Duration timeout;
private final Sinks.Many<T> sink;
private final AtomicInteger pending;
private final Disposable.Composite timers;
/**
* Create a work to produce a window of items.
*
* @param parent the parent subscriber that deliveries items for the window.
* @param id an identifier for the work.
* @param demand the upper bound for the number of items to include in the window.
* @param timeout the maximum {@link Duration} since the window was opened before closing it.
*/
private WindowWork(WindowedSubscriber<T> parent, long id, int demand, Duration timeout) {
this.logger = createLogger(parent.loggingContext, id, demand);
this.parent = parent;
this.demand = demand;
this.pending = new AtomicInteger(demand);
this.timeout = timeout;
this.sink = createSink();
this.timers = Disposables.composite();
}
/**
* Check if the window was canceled from "outside WindowedSubscriber".
*
* @see WindowWork
*
* @return true if the window was canceled externally.
*/
boolean isCanceled() {
return isCanceled.get();
}
/**
* Check if the window has received the number of items it demanded.
*
* @return true if demanded number of items are received.
*/
boolean hasReceivedDemanded() {
return pending.get() <= 0;
}
/**
* Check if the window has timeout or there was a failure while scheduling or waiting for timeout.
*
* @see TimeoutReason
*
* @return true if the window has timed out or there was a failure while scheduling or waiting for timeout.
*/
boolean hasTimedOut() {
return timeoutReason.get() != null;
}
/**
* The number of items so far received by the window.
*
* @return the number of items received by the window.
*/
int getPending() {
return pending.get();
}
/**
* The desired number of items to include in the window.
*
* @return the demanded window size.
*/
private long getDemand() {
return demand;
}
/**
* Check if consumer unexpectedly thrown an error while handling an item in the window.
*
* @return true if consumer thrown an error while handling an item.
*/
private boolean hasConsumerError() {
return consumerError.get() != null;
}
/**
* Check if the window is in 'streaming state'.
* <p>
* The window is called 'streaming window' or in 'streaming state', if it has sent at least one item but still
* needs more upstream items to meet its demand.
* </p>
*
* @see WindowedSubscriberOptions
*
* @return true if the window is in 'streaming state', false otherwise.
*/
private boolean isStreaming() {
final int pending = getPending();
return pending > 0 && pending < demand;
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Perform one time initialization of the work to open its window.
* </p>
* @return true if the work is initialized for the first time; false, if it is already initialized.
*/
private boolean init() {
if (isInitialized.getAndSet(true)) {
return false;
}
this.timers.add(beginTimeoutTimer());
this.timers.add(beginNextItemTimeoutTimer());
return true;
}
/**
* Get the flux that streams the window events (items and termination) to it's downstream.
* <p>
* The downstream is the {@link IterableStream} that users uses to consume the window events synchronously.
* </p>
* <p>
* The {@code drainOnCancel} enables registering for a drain loop run when the window termination gets triggered
* from "outside WindowedSubscriber", which will be the case if window flux gets canceled. In all cases
* other than cancel, the window termination (completion, error) is triggered from "within the WindowedSubscriber".
* The WindowedSubscriber needs to control or to be aware of the window termination, so that it can pick work for
* the next window.
* </p>
*
* @param drainOnCancel true if the drain loop needs to be run when the flux is canceled.
* @return the flux streaming window events.
*/
private Flux<T> windowFlux(boolean drainOnCancel) {
final Function<Flux<T>, Flux<T>> decorator = parent.windowDecorator;
final Flux<T> flux = decorator != null ? decorator.apply(sink.asFlux()) : sink.asFlux();
if (drainOnCancel) {
return flux
.doFinally(s -> {
if (s == SignalType.CANCEL) {
isCanceled.set(true);
final WindowWork<T> w = this;
Schedulers.boundedElastic().schedule(() -> parent.postTimedOutOrCanceledWork(w));
}
});
} else {
return flux;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Attempt to deliver the next item to the work's window.
* </p>
* @param item the item to emit.
* @return the result of the emission attempt.
*/
private EmitNextResult tryEmitNext(T item) {
final int c = pending.getAndDecrement();
if (c <= 0) {
if (c < 0) {
withPendingKey(logger.atWarning()).log("Unexpected emit-next attempt when no more demand.");
}
return EmitNextResult.RECEIVED_DEMANDED;
}
final Sinks.EmitResult emitResult;
try {
emitResult = sink.tryEmitNext(item);
} catch (Throwable e) {
consumerError.set(e);
withPendingKey(logger.atError()).log("Unexpected consumer error occurred while emitting.", e);
return EmitNextResult.CONSUMER_ERROR;
}
if (emitResult == Sinks.EmitResult.OK) {
return EmitNextResult.OK;
} else {
withPendingKey(logger.atError()).addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not emit-next.");
return EmitNextResult.SINK_ERROR;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Terminate the work to close the window it represents.
* </p>
* @param terminalState the terminal state of the work.
*/
/**
* Starts a timer that upon expiration trigger the window close signal.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginTimeoutTimer() {
final Disposable disposable = Mono.delay(timeout)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT),
e -> onTimeout(TimeoutReason.timeoutErrored(e)));
return disposable;
}
/**
* Start the timer to trigger the window close signal if the next item does not arrive within timeout.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginNextItemTimeoutTimer() {
final Duration nextItemTimout = parent.nextItemTimout;
if (nextItemTimout == null) {
return Disposables.disposed();
}
final Flux<Mono<Long>> nextItemTimer = sink.asFlux().map(__ -> Mono.delay(nextItemTimout));
final Disposable disposable = Flux.switchOnNext(nextItemTimer)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT_NEXT_ITEM),
e -> onTimeout(TimeoutReason.timeoutNextItemErrored(e)));
return disposable;
}
/**
* Signal the window close by timeout.
*
* @param reason the timeout reason.
*/
private void onTimeout(TimeoutReason reason) {
if (timeoutReason.compareAndSet(null, reason)) {
final WindowWork<T> w = this;
parent.postTimedOutOrCanceledWork(w);
}
}
/**
* Assert the condition expected to be met for a given terminal state.
*
* @param condition the condition
* @param terminalState the work terminal state.
*
* @throws IllegalStateException if the condition is not met.
*/
private void assertCondition(boolean condition, WorkTerminalState terminalState) {
if (condition) {
return;
}
final String message = String.format("Illegal invocation of terminate(%s).", terminalState);
throw withPendingKey(logger.atError()).log(new IllegalStateException(message));
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow().
* <p>
* Attempt successful closure of the window.
*/
private void closeWindow() {
sink.emitComplete((signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not close window.");
return false;
});
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow(e).
* <p>
* Attempt to close the window with error.
*
* @param e the error to close the window with.
*/
private void closeWindow(Throwable e) {
sink.emitError(e, (signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not closed window with error.");
return false;
});
}
/**
* Annotate the logger with current number of items pending to meet the window demand.
*
* @param logger the logger to annotate.
* @return the annotated logger.
*/
private LoggingEventBuilder withPendingKey(LoggingEventBuilder logger) {
return logger.addKeyValue(PENDING_KEY, pending.get());
}
/**
* Gets the logger (used by parent subscriber when initializing this work).
*
* @return the logger.
*/
private LoggingEventBuilder getLogger() {
return withPendingKey(logger.atVerbose());
}
/**
* Creates the logger for the work.
*
* @param parentLogContext the parent logging context.
* @param id the identifier for the work.
* @param demand the demand for the window this work represents.
* @return the logger for the work.
*/
private static ClientLogger createLogger(Map<String, Object> parentLogContext, long id, int demand) {
final Map<String, Object> loggingContext = new HashMap<>(parentLogContext.size() + 5);
loggingContext.putAll(parentLogContext);
loggingContext.put(WORK_ID_KEY, id);
loggingContext.put(DEMAND_KEY, demand);
return new ClientLogger(WindowWork.class, loggingContext);
}
/**
* Creates the sink to signal window events.
* <p>
* Events signaling (items, termination) to this sink will be serialized by the parent's drain-loop,
* <ul>
* <li>items signaling {@link WindowWork
* <li>termination signaling {@link WindowWork
* </ul>
* </p>
* <p>
* There will be two subscribers to this sink,
* <ul>
* <li>subscription for next item timeout.</li>
* <li>subscription from IterableStream.</li>
* </ul>
* to support this multi-subscription broadcasting use case, the sink is replay-able.
* </p>
* <p>
* This sink has an internal unbounded queue acting as a buffer between the drain-loop and the consumer of the
* window. This allows drain-loop to enqueue the window items as fast as it could and move to processing next
* window work. The window size will be the cap for the number of items that gets enqueued.
* </p>
*
* @return the sink to signal window events (items and termination).
* @param <T> type of items in the sink.
*/
private static <T> Sinks.Many<T> createSink() {
return Sinks.many().replay().all();
}
/**
* A type describing a successful completion of timeout or an error while scheduling or waiting for timeout.
*/
private static final class TimeoutReason {
static final TimeoutReason TIMEOUT = new TimeoutReason("Timeout occurred.", null);
static final TimeoutReason TIMEOUT_NEXT_ITEM = new TimeoutReason("Timeout between the messages occurred.", null);
private final String message;
private final Throwable error;
/**
* Create reason describing the error while scheduling or waiting for timeout.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout.", error);
}
/**
* Create reason describing the error while scheduling or waiting for timeout between items.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutNextItemErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout between the messages.", error);
}
private TimeoutReason(String message, Throwable error) {
this.message = message;
this.error = error;
}
String getMessage() {
return message;
}
Throwable getError() {
return error;
}
}
} | class WindowWork<T> {
private static final String DEMAND_KEY = "demand";
private static final String PENDING_KEY = "pending";
public static final String SIGNAL_TYPE_KEY = "signalType";
public static final String EMIT_RESULT_KEY = "emitResult";
private static final String TERMINATING_WORK = "Terminating the work.";
private final AtomicBoolean isInitialized = new AtomicBoolean(false);
private final AtomicBoolean isCanceled = new AtomicBoolean(false);
private final AtomicBoolean isTerminated = new AtomicBoolean(false);
private final AtomicReference<TimeoutReason> timeoutReason = new AtomicReference<>(null);
private final AtomicReference<Throwable> consumerError = new AtomicReference<>(null);
private final ClientLogger logger;
private final WindowedSubscriber<T> parent;
private final int demand;
private final Duration timeout;
private final Sinks.Many<T> sink;
private final AtomicInteger pending;
private final Disposable.Composite timers;
/**
* Create a work to produce a window of items.
*
* @param parent the parent subscriber that deliveries items for the window.
* @param id an identifier for the work.
* @param demand the upper bound for the number of items to include in the window.
* @param timeout the maximum {@link Duration} since the window was opened before closing it.
*/
private WindowWork(WindowedSubscriber<T> parent, long id, int demand, Duration timeout) {
this.logger = createLogger(parent.loggingContext, id, demand);
this.parent = parent;
this.demand = demand;
this.pending = new AtomicInteger(demand);
this.timeout = timeout;
this.sink = createSink();
this.timers = Disposables.composite();
}
/**
* Check if the window was canceled from "outside WindowedSubscriber".
*
* @see WindowWork
*
* @return true if the window was canceled externally.
*/
boolean isCanceled() {
return isCanceled.get();
}
/**
* Check if the window has received the number of items it demanded.
*
* @return true if demanded number of items are received.
*/
boolean hasReceivedDemanded() {
return pending.get() <= 0;
}
/**
* Check if the window has timeout or there was a failure while scheduling or waiting for timeout.
*
* @see TimeoutReason
*
* @return true if the window has timed out or there was a failure while scheduling or waiting for timeout.
*/
boolean hasTimedOut() {
return timeoutReason.get() != null;
}
/**
* The number of items so far received by the window.
*
* @return the number of items received by the window.
*/
int getPending() {
return pending.get();
}
/**
* The desired number of items to include in the window.
*
* @return the demanded window size.
*/
private long getDemand() {
return demand;
}
/**
* Check if consumer unexpectedly thrown an error while handling an item in the window.
*
* @return true if consumer thrown an error while handling an item.
*/
private boolean hasConsumerError() {
return consumerError.get() != null;
}
/**
* Check if the window is in 'streaming state'.
* <p>
* The window is called 'streaming window' or in 'streaming state', if it has sent at least one item but still
* needs more upstream items to meet its demand.
* </p>
*
* @see WindowedSubscriberOptions
*
* @return true if the window is in 'streaming state', false otherwise.
*/
private boolean isStreaming() {
final int pending = getPending();
return pending > 0 && pending < demand;
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Perform one time initialization of the work to open its window.
* </p>
* @return true if the work is initialized for the first time; false, if it is already initialized.
*/
private boolean init() {
if (isInitialized.getAndSet(true)) {
return false;
}
this.timers.add(beginTimeoutTimer());
this.timers.add(beginNextItemTimeoutTimer());
return true;
}
/**
* Get the flux that streams the window events (items and termination) to it's downstream.
* <p>
* The downstream is the {@link IterableStream} that users uses to consume the window events synchronously.
* </p>
* <p>
* The {@code drainOnCancel} enables registering for a drain loop run when the window termination gets triggered
* from "outside WindowedSubscriber", which will be the case if window flux gets canceled. In all cases
* other than cancel, the window termination (completion, error) is triggered from "within the WindowedSubscriber".
* The WindowedSubscriber needs to control or to be aware of the window termination, so that it can pick work for
* the next window.
* </p>
*
* @param drainOnCancel true if the drain loop needs to be run when the flux is canceled.
* @return the flux streaming window events.
*/
private Flux<T> windowFlux(boolean drainOnCancel) {
final Function<Flux<T>, Flux<T>> decorator = parent.windowDecorator;
final Flux<T> flux = decorator != null ? decorator.apply(sink.asFlux()) : sink.asFlux();
if (drainOnCancel) {
return flux
.doFinally(s -> {
if (s == SignalType.CANCEL) {
isCanceled.set(true);
final WindowWork<T> w = this;
Schedulers.boundedElastic().schedule(() -> parent.postTimedOutOrCanceledWork(w));
}
});
} else {
return flux;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Attempt to deliver the next item to the work's window.
* </p>
* @param item the item to emit.
* @return the result of the emission attempt.
*/
private EmitNextResult tryEmitNext(T item) {
final int c = pending.getAndDecrement();
if (c <= 0) {
if (c < 0) {
withPendingKey(logger.atWarning()).log("Unexpected emit-next attempt when no more demand.");
}
return EmitNextResult.RECEIVED_DEMANDED;
}
final Sinks.EmitResult emitResult;
try {
emitResult = sink.tryEmitNext(item);
} catch (Throwable e) {
consumerError.set(e);
withPendingKey(logger.atError()).log("Unexpected consumer error occurred while emitting.", e);
return EmitNextResult.CONSUMER_ERROR;
}
if (emitResult == Sinks.EmitResult.OK) {
return EmitNextResult.OK;
} else {
withPendingKey(logger.atError()).addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not emit-next.");
return EmitNextResult.SINK_ERROR;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Terminate the work to close the window it represents.
* </p>
* @param terminalState the terminal state of the work.
*/
/**
* Starts a timer that upon expiration trigger the window close signal.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginTimeoutTimer() {
final Disposable disposable = Mono.delay(timeout)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT),
e -> onTimeout(TimeoutReason.timeoutErrored(e)));
return disposable;
}
/**
* Start the timer to trigger the window close signal if the next item does not arrive within timeout.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginNextItemTimeoutTimer() {
final Duration nextItemTimout = parent.nextItemTimout;
if (nextItemTimout == null) {
return Disposables.disposed();
}
final Flux<Mono<Long>> nextItemTimer = sink.asFlux().map(__ -> Mono.delay(nextItemTimout));
final Disposable disposable = Flux.switchOnNext(nextItemTimer)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT_NEXT_ITEM),
e -> onTimeout(TimeoutReason.timeoutNextItemErrored(e)));
return disposable;
}
/**
* Signal the window close by timeout.
*
* @param reason the timeout reason.
*/
private void onTimeout(TimeoutReason reason) {
if (timeoutReason.compareAndSet(null, reason)) {
final WindowWork<T> w = this;
parent.postTimedOutOrCanceledWork(w);
}
}
/**
* Assert the condition expected to be met for a given terminal state.
*
* @param condition the condition
* @param terminalState the work terminal state.
*
* @throws IllegalStateException if the condition is not met.
*/
private void assertCondition(boolean condition, WorkTerminalState terminalState) {
if (condition) {
return;
}
final String message = String.format("Illegal invocation of terminate(%s).", terminalState);
throw withPendingKey(logger.atError()).log(new IllegalStateException(message));
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow().
* <p>
* Attempt successful closure of the window.
*/
private void closeWindow() {
sink.emitComplete((signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not close window.");
return false;
});
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow(e).
* <p>
* Attempt to close the window with error.
*
* @param e the error to close the window with.
*/
private void closeWindow(Throwable e) {
sink.emitError(e, (signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not closed window with error.");
return false;
});
}
/**
* Annotate the logger with current number of items pending to meet the window demand.
*
* @param logger the logger to annotate.
* @return the annotated logger.
*/
private LoggingEventBuilder withPendingKey(LoggingEventBuilder logger) {
return logger.addKeyValue(PENDING_KEY, pending.get());
}
/**
* Gets the logger (used by parent subscriber when initializing this work).
*
* @return the logger.
*/
private LoggingEventBuilder getLogger() {
return withPendingKey(logger.atVerbose());
}
/**
* Creates the logger for the work.
*
* @param parentLogContext the parent logging context.
* @param id the identifier for the work.
* @param demand the demand for the window this work represents.
* @return the logger for the work.
*/
private static ClientLogger createLogger(Map<String, Object> parentLogContext, long id, int demand) {
final Map<String, Object> loggingContext = new HashMap<>(parentLogContext.size() + 5);
loggingContext.putAll(parentLogContext);
loggingContext.put(WORK_ID_KEY, id);
loggingContext.put(DEMAND_KEY, demand);
return new ClientLogger(WindowWork.class, loggingContext);
}
/**
* Creates the sink to signal window events.
* <p>
* Events signaling (items, termination) to this sink will be serialized by the parent's drain-loop,
* <ul>
* <li>items signaling {@link WindowWork
* <li>termination signaling {@link WindowWork
* </ul>
* </p>
* <p>
* There will be two subscribers to this sink,
* <ul>
* <li>subscription for next item timeout.</li>
* <li>subscription from IterableStream.</li>
* </ul>
* to support this multi-subscription broadcasting use case, the sink is replay-able.
* </p>
* <p>
* This sink has an internal unbounded queue acting as a buffer between the drain-loop and the consumer of the
* window. This allows drain-loop to enqueue the window items as fast as it could and move to processing next
* window work. The window size will be the cap for the number of items that gets enqueued.
* </p>
*
* @return the sink to signal window events (items and termination).
* @param <T> type of items in the sink.
*/
private static <T> Sinks.Many<T> createSink() {
return Sinks.many().replay().all();
}
/**
* A type describing a successful completion of timeout or an error while scheduling or waiting for timeout.
*/
private static final class TimeoutReason {
static final TimeoutReason TIMEOUT = new TimeoutReason("Timeout occurred.", null);
static final TimeoutReason TIMEOUT_NEXT_ITEM = new TimeoutReason("Timeout between the messages occurred.", null);
private final String message;
private final Throwable error;
/**
* Create reason describing the error while scheduling or waiting for timeout.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout.", error);
}
/**
* Create reason describing the error while scheduling or waiting for timeout between items.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutNextItemErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout between the messages.", error);
}
private TimeoutReason(String message, Throwable error) {
this.message = message;
this.error = error;
}
String getMessage() {
return message;
}
Throwable getError() {
return error;
}
}
} |
nice, thanks Liudmila! lgtm | private void terminate(WorkTerminalState terminalState) {
if (isTerminated.getAndSet(true)) {
return;
}
try {
timers.dispose();
} finally {
if (terminalState == WorkTerminalState.SINK_ERROR) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-error")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.CANCELED) {
assertCondition(isCanceled(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-canceled")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.RECEIVED_DEMANDED) {
assertCondition(hasReceivedDemanded(), terminalState);
withPendingKey(logger.atVerbose()).log(TERMINATING_WORK);
closeWindow();
return;
}
if (terminalState == WorkTerminalState.CONSUMER_ERROR) {
assertCondition(hasConsumerError(), terminalState);
final Throwable e = consumerError.get();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.TIMED_OUT) {
assertCondition(hasTimedOut(), terminalState);
final TimeoutReason reason = timeoutReason.get();
final Throwable e = reason.getError();
if (e != null) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK, e);
closeWindow(e);
} else {
withPendingKey(logger.atVerbose())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK);
closeWindow();
}
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL) {
assertCondition(parent.isDoneOrCanceled(), terminalState);
final Throwable e = parent.getTerminalError();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL_CLEAN_CLOSE) {
assertCondition(parent.isDoneOrCanceled() && isStreaming(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "terminal-clean-close")
.log(TERMINATING_WORK);
closeWindow();
return;
}
}
throw logger.atError().log(new IllegalStateException("Unknown work terminal state." + terminalState));
} | .log(TERMINATING_WORK); | private void terminate(WorkTerminalState terminalState) {
if (isTerminated.getAndSet(true)) {
return;
}
try {
timers.dispose();
} finally {
if (terminalState == WorkTerminalState.SINK_ERROR) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-error")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.CANCELED) {
assertCondition(isCanceled(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "sink-canceled")
.log(TERMINATING_WORK);
return;
}
if (terminalState == WorkTerminalState.RECEIVED_DEMANDED) {
assertCondition(hasReceivedDemanded(), terminalState);
withPendingKey(logger.atVerbose()).log(TERMINATING_WORK);
closeWindow();
return;
}
if (terminalState == WorkTerminalState.CONSUMER_ERROR) {
assertCondition(hasConsumerError(), terminalState);
final Throwable e = consumerError.get();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.TIMED_OUT) {
assertCondition(hasTimedOut(), terminalState);
final TimeoutReason reason = timeoutReason.get();
final Throwable e = reason.getError();
if (e != null) {
withPendingKey(logger.atWarning())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK, e);
closeWindow(e);
} else {
withPendingKey(logger.atVerbose())
.addKeyValue("reason", reason.getMessage())
.log(TERMINATING_WORK);
closeWindow();
}
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL) {
assertCondition(parent.isDoneOrCanceled(), terminalState);
final Throwable e = parent.getTerminalError();
withPendingKey(logger.atWarning()).log(e.getMessage(), e);
closeWindow(e);
return;
}
if (terminalState == WorkTerminalState.PARENT_TERMINAL_CLEAN_CLOSE) {
assertCondition(parent.isDoneOrCanceled() && isStreaming(), terminalState);
withPendingKey(logger.atWarning())
.addKeyValue("reason", "terminal-clean-close")
.log(TERMINATING_WORK);
closeWindow();
return;
}
}
throw logger.atError().log(new IllegalStateException("Unknown work terminal state." + terminalState));
} | class WindowWork<T> {
private static final String DEMAND_KEY = "demand";
private static final String PENDING_KEY = "pending";
public static final String SIGNAL_TYPE_KEY = "signalType";
public static final String EMIT_RESULT_KEY = "emitResult";
private static final String TERMINATING_WORK = "Terminating the work.";
private final AtomicBoolean isInitialized = new AtomicBoolean(false);
private final AtomicBoolean isCanceled = new AtomicBoolean(false);
private final AtomicBoolean isTerminated = new AtomicBoolean(false);
private final AtomicReference<TimeoutReason> timeoutReason = new AtomicReference<>(null);
private final AtomicReference<Throwable> consumerError = new AtomicReference<>(null);
private final ClientLogger logger;
private final WindowedSubscriber<T> parent;
private final int demand;
private final Duration timeout;
private final Sinks.Many<T> sink;
private final AtomicInteger pending;
private final Disposable.Composite timers;
/**
* Create a work to produce a window of items.
*
* @param parent the parent subscriber that deliveries items for the window.
* @param id an identifier for the work.
* @param demand the upper bound for the number of items to include in the window.
* @param timeout the maximum {@link Duration} since the window was opened before closing it.
*/
private WindowWork(WindowedSubscriber<T> parent, long id, int demand, Duration timeout) {
this.logger = createLogger(parent.loggingContext, id, demand);
this.parent = parent;
this.demand = demand;
this.pending = new AtomicInteger(demand);
this.timeout = timeout;
this.sink = createSink();
this.timers = Disposables.composite();
}
/**
* Check if the window was canceled from "outside WindowedSubscriber".
*
* @see WindowWork
*
* @return true if the window was canceled externally.
*/
boolean isCanceled() {
return isCanceled.get();
}
/**
* Check if the window has received the number of items it demanded.
*
* @return true if demanded number of items are received.
*/
boolean hasReceivedDemanded() {
return pending.get() <= 0;
}
/**
* Check if the window has timeout or there was a failure while scheduling or waiting for timeout.
*
* @see TimeoutReason
*
* @return true if the window has timed out or there was a failure while scheduling or waiting for timeout.
*/
boolean hasTimedOut() {
return timeoutReason.get() != null;
}
/**
* The number of items so far received by the window.
*
* @return the number of items received by the window.
*/
int getPending() {
return pending.get();
}
/**
* The desired number of items to include in the window.
*
* @return the demanded window size.
*/
private long getDemand() {
return demand;
}
/**
* Check if consumer unexpectedly thrown an error while handling an item in the window.
*
* @return true if consumer thrown an error while handling an item.
*/
private boolean hasConsumerError() {
return consumerError.get() != null;
}
/**
* Check if the window is in 'streaming state'.
* <p>
* The window is called 'streaming window' or in 'streaming state', if it has sent at least one item but still
* needs more upstream items to meet its demand.
* </p>
*
* @see WindowedSubscriberOptions
*
* @return true if the window is in 'streaming state', false otherwise.
*/
private boolean isStreaming() {
final int pending = getPending();
return pending > 0 && pending < demand;
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Perform one time initialization of the work to open its window.
* </p>
* @return true if the work is initialized for the first time; false, if it is already initialized.
*/
private boolean init() {
if (isInitialized.getAndSet(true)) {
return false;
}
this.timers.add(beginTimeoutTimer());
this.timers.add(beginNextItemTimeoutTimer());
return true;
}
/**
* Get the flux that streams the window events (items and termination) to it's downstream.
* <p>
* The downstream is the {@link IterableStream} that users uses to consume the window events synchronously.
* </p>
* <p>
* The {@code drainOnCancel} enables registering for a drain loop run when the window termination gets triggered
* from "outside WindowedSubscriber", which will be the case if window flux gets canceled. In all cases
* other than cancel, the window termination (completion, error) is triggered from "within the WindowedSubscriber".
* The WindowedSubscriber needs to control or to be aware of the window termination, so that it can pick work for
* the next window.
* </p>
*
* @param drainOnCancel true if the drain loop needs to be run when the flux is canceled.
* @return the flux streaming window events.
*/
private Flux<T> windowFlux(boolean drainOnCancel) {
final Function<Flux<T>, Flux<T>> decorator = parent.windowDecorator;
final Flux<T> flux = decorator != null ? decorator.apply(sink.asFlux()) : sink.asFlux();
if (drainOnCancel) {
return flux
.doFinally(s -> {
if (s == SignalType.CANCEL) {
isCanceled.set(true);
final WindowWork<T> w = this;
Schedulers.boundedElastic().schedule(() -> parent.postTimedOutOrCanceledWork(w));
}
});
} else {
return flux;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Attempt to deliver the next item to the work's window.
* </p>
* @param item the item to emit.
* @return the result of the emission attempt.
*/
private EmitNextResult tryEmitNext(T item) {
final int c = pending.getAndDecrement();
if (c <= 0) {
if (c < 0) {
withPendingKey(logger.atWarning()).log("Unexpected emit-next attempt when no more demand.");
}
return EmitNextResult.RECEIVED_DEMANDED;
}
final Sinks.EmitResult emitResult;
try {
emitResult = sink.tryEmitNext(item);
} catch (Throwable e) {
consumerError.set(e);
withPendingKey(logger.atError()).log("Unexpected consumer error occurred while emitting.", e);
return EmitNextResult.CONSUMER_ERROR;
}
if (emitResult == Sinks.EmitResult.OK) {
return EmitNextResult.OK;
} else {
withPendingKey(logger.atError()).addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not emit-next.");
return EmitNextResult.SINK_ERROR;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Terminate the work to close the window it represents.
* </p>
* @param terminalState the terminal state of the work.
*/
/**
* Starts a timer that upon expiration trigger the window close signal.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginTimeoutTimer() {
final Disposable disposable = Mono.delay(timeout)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT),
e -> onTimeout(TimeoutReason.timeoutErrored(e)));
return disposable;
}
/**
* Start the timer to trigger the window close signal if the next item does not arrive within timeout.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginNextItemTimeoutTimer() {
final Duration nextItemTimout = parent.nextItemTimout;
if (nextItemTimout == null) {
return Disposables.disposed();
}
final Flux<Mono<Long>> nextItemTimer = sink.asFlux().map(__ -> Mono.delay(nextItemTimout));
final Disposable disposable = Flux.switchOnNext(nextItemTimer)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT_NEXT_ITEM),
e -> onTimeout(TimeoutReason.timeoutNextItemErrored(e)));
return disposable;
}
/**
* Signal the window close by timeout.
*
* @param reason the timeout reason.
*/
private void onTimeout(TimeoutReason reason) {
if (timeoutReason.compareAndSet(null, reason)) {
final WindowWork<T> w = this;
parent.postTimedOutOrCanceledWork(w);
}
}
/**
* Assert the condition expected to be met for a given terminal state.
*
* @param condition the condition
* @param terminalState the work terminal state.
*
* @throws IllegalStateException if the condition is not met.
*/
private void assertCondition(boolean condition, WorkTerminalState terminalState) {
if (condition) {
return;
}
final String message = String.format("Illegal invocation of terminate(%s).", terminalState);
throw withPendingKey(logger.atError()).log(new IllegalStateException(message));
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow().
* <p>
* Attempt successful closure of the window.
*/
private void closeWindow() {
sink.emitComplete((signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not close window.");
return false;
});
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow(e).
* <p>
* Attempt to close the window with error.
*
* @param e the error to close the window with.
*/
private void closeWindow(Throwable e) {
sink.emitError(e, (signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not closed window with error.");
return false;
});
}
/**
* Annotate the logger with current number of items pending to meet the window demand.
*
* @param logger the logger to annotate.
* @return the annotated logger.
*/
private LoggingEventBuilder withPendingKey(LoggingEventBuilder logger) {
return logger.addKeyValue(PENDING_KEY, pending.get());
}
/**
* Gets the logger (used by parent subscriber when initializing this work).
*
* @return the logger.
*/
private LoggingEventBuilder getLogger() {
return withPendingKey(logger.atVerbose());
}
/**
* Creates the logger for the work.
*
* @param parentLogContext the parent logging context.
* @param id the identifier for the work.
* @param demand the demand for the window this work represents.
* @return the logger for the work.
*/
private static ClientLogger createLogger(Map<String, Object> parentLogContext, long id, int demand) {
final Map<String, Object> loggingContext = new HashMap<>(parentLogContext.size() + 5);
loggingContext.putAll(parentLogContext);
loggingContext.put(WORK_ID_KEY, id);
loggingContext.put(DEMAND_KEY, demand);
return new ClientLogger(WindowWork.class, loggingContext);
}
/**
* Creates the sink to signal window events.
* <p>
* Events signaling (items, termination) to this sink will be serialized by the parent's drain-loop,
* <ul>
* <li>items signaling {@link WindowWork
* <li>termination signaling {@link WindowWork
* </ul>
* </p>
* <p>
* There will be two subscribers to this sink,
* <ul>
* <li>subscription for next item timeout.</li>
* <li>subscription from IterableStream.</li>
* </ul>
* to support this multi-subscription broadcasting use case, the sink is replay-able.
* </p>
* <p>
* This sink has an internal unbounded queue acting as a buffer between the drain-loop and the consumer of the
* window. This allows drain-loop to enqueue the window items as fast as it could and move to processing next
* window work. The window size will be the cap for the number of items that gets enqueued.
* </p>
*
* @return the sink to signal window events (items and termination).
* @param <T> type of items in the sink.
*/
private static <T> Sinks.Many<T> createSink() {
return Sinks.many().replay().all();
}
/**
* A type describing a successful completion of timeout or an error while scheduling or waiting for timeout.
*/
private static final class TimeoutReason {
static final TimeoutReason TIMEOUT = new TimeoutReason("Timeout occurred.", null);
static final TimeoutReason TIMEOUT_NEXT_ITEM = new TimeoutReason("Timeout between the messages occurred.", null);
private final String message;
private final Throwable error;
/**
* Create reason describing the error while scheduling or waiting for timeout.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout.", error);
}
/**
* Create reason describing the error while scheduling or waiting for timeout between items.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutNextItemErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout between the messages.", error);
}
private TimeoutReason(String message, Throwable error) {
this.message = message;
this.error = error;
}
String getMessage() {
return message;
}
Throwable getError() {
return error;
}
}
} | class WindowWork<T> {
private static final String DEMAND_KEY = "demand";
private static final String PENDING_KEY = "pending";
public static final String SIGNAL_TYPE_KEY = "signalType";
public static final String EMIT_RESULT_KEY = "emitResult";
private static final String TERMINATING_WORK = "Terminating the work.";
private final AtomicBoolean isInitialized = new AtomicBoolean(false);
private final AtomicBoolean isCanceled = new AtomicBoolean(false);
private final AtomicBoolean isTerminated = new AtomicBoolean(false);
private final AtomicReference<TimeoutReason> timeoutReason = new AtomicReference<>(null);
private final AtomicReference<Throwable> consumerError = new AtomicReference<>(null);
private final ClientLogger logger;
private final WindowedSubscriber<T> parent;
private final int demand;
private final Duration timeout;
private final Sinks.Many<T> sink;
private final AtomicInteger pending;
private final Disposable.Composite timers;
/**
* Create a work to produce a window of items.
*
* @param parent the parent subscriber that deliveries items for the window.
* @param id an identifier for the work.
* @param demand the upper bound for the number of items to include in the window.
* @param timeout the maximum {@link Duration} since the window was opened before closing it.
*/
private WindowWork(WindowedSubscriber<T> parent, long id, int demand, Duration timeout) {
this.logger = createLogger(parent.loggingContext, id, demand);
this.parent = parent;
this.demand = demand;
this.pending = new AtomicInteger(demand);
this.timeout = timeout;
this.sink = createSink();
this.timers = Disposables.composite();
}
/**
* Check if the window was canceled from "outside WindowedSubscriber".
*
* @see WindowWork
*
* @return true if the window was canceled externally.
*/
boolean isCanceled() {
return isCanceled.get();
}
/**
* Check if the window has received the number of items it demanded.
*
* @return true if demanded number of items are received.
*/
boolean hasReceivedDemanded() {
return pending.get() <= 0;
}
/**
* Check if the window has timeout or there was a failure while scheduling or waiting for timeout.
*
* @see TimeoutReason
*
* @return true if the window has timed out or there was a failure while scheduling or waiting for timeout.
*/
boolean hasTimedOut() {
return timeoutReason.get() != null;
}
/**
* The number of items so far received by the window.
*
* @return the number of items received by the window.
*/
int getPending() {
return pending.get();
}
/**
* The desired number of items to include in the window.
*
* @return the demanded window size.
*/
private long getDemand() {
return demand;
}
/**
* Check if consumer unexpectedly thrown an error while handling an item in the window.
*
* @return true if consumer thrown an error while handling an item.
*/
private boolean hasConsumerError() {
return consumerError.get() != null;
}
/**
* Check if the window is in 'streaming state'.
* <p>
* The window is called 'streaming window' or in 'streaming state', if it has sent at least one item but still
* needs more upstream items to meet its demand.
* </p>
*
* @see WindowedSubscriberOptions
*
* @return true if the window is in 'streaming state', false otherwise.
*/
private boolean isStreaming() {
final int pending = getPending();
return pending > 0 && pending < demand;
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Perform one time initialization of the work to open its window.
* </p>
* @return true if the work is initialized for the first time; false, if it is already initialized.
*/
private boolean init() {
if (isInitialized.getAndSet(true)) {
return false;
}
this.timers.add(beginTimeoutTimer());
this.timers.add(beginNextItemTimeoutTimer());
return true;
}
/**
* Get the flux that streams the window events (items and termination) to it's downstream.
* <p>
* The downstream is the {@link IterableStream} that users uses to consume the window events synchronously.
* </p>
* <p>
* The {@code drainOnCancel} enables registering for a drain loop run when the window termination gets triggered
* from "outside WindowedSubscriber", which will be the case if window flux gets canceled. In all cases
* other than cancel, the window termination (completion, error) is triggered from "within the WindowedSubscriber".
* The WindowedSubscriber needs to control or to be aware of the window termination, so that it can pick work for
* the next window.
* </p>
*
* @param drainOnCancel true if the drain loop needs to be run when the flux is canceled.
* @return the flux streaming window events.
*/
private Flux<T> windowFlux(boolean drainOnCancel) {
final Function<Flux<T>, Flux<T>> decorator = parent.windowDecorator;
final Flux<T> flux = decorator != null ? decorator.apply(sink.asFlux()) : sink.asFlux();
if (drainOnCancel) {
return flux
.doFinally(s -> {
if (s == SignalType.CANCEL) {
isCanceled.set(true);
final WindowWork<T> w = this;
Schedulers.boundedElastic().schedule(() -> parent.postTimedOutOrCanceledWork(w));
}
});
} else {
return flux;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Attempt to deliver the next item to the work's window.
* </p>
* @param item the item to emit.
* @return the result of the emission attempt.
*/
private EmitNextResult tryEmitNext(T item) {
final int c = pending.getAndDecrement();
if (c <= 0) {
if (c < 0) {
withPendingKey(logger.atWarning()).log("Unexpected emit-next attempt when no more demand.");
}
return EmitNextResult.RECEIVED_DEMANDED;
}
final Sinks.EmitResult emitResult;
try {
emitResult = sink.tryEmitNext(item);
} catch (Throwable e) {
consumerError.set(e);
withPendingKey(logger.atError()).log("Unexpected consumer error occurred while emitting.", e);
return EmitNextResult.CONSUMER_ERROR;
}
if (emitResult == Sinks.EmitResult.OK) {
return EmitNextResult.OK;
} else {
withPendingKey(logger.atError()).addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not emit-next.");
return EmitNextResult.SINK_ERROR;
}
}
/**
* CONTRACT: Never invoke from the outside of serialized drain-loop.
* <p>
* Terminate the work to close the window it represents.
* </p>
* @param terminalState the terminal state of the work.
*/
/**
* Starts a timer that upon expiration trigger the window close signal.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginTimeoutTimer() {
final Disposable disposable = Mono.delay(timeout)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT),
e -> onTimeout(TimeoutReason.timeoutErrored(e)));
return disposable;
}
/**
* Start the timer to trigger the window close signal if the next item does not arrive within timeout.
*
* @return {@link Disposable} to cancel the timer.
*/
private Disposable beginNextItemTimeoutTimer() {
final Duration nextItemTimout = parent.nextItemTimout;
if (nextItemTimout == null) {
return Disposables.disposed();
}
final Flux<Mono<Long>> nextItemTimer = sink.asFlux().map(__ -> Mono.delay(nextItemTimout));
final Disposable disposable = Flux.switchOnNext(nextItemTimer)
.publishOn(Schedulers.boundedElastic())
.subscribe(__ -> onTimeout(TimeoutReason.TIMEOUT_NEXT_ITEM),
e -> onTimeout(TimeoutReason.timeoutNextItemErrored(e)));
return disposable;
}
/**
* Signal the window close by timeout.
*
* @param reason the timeout reason.
*/
private void onTimeout(TimeoutReason reason) {
if (timeoutReason.compareAndSet(null, reason)) {
final WindowWork<T> w = this;
parent.postTimedOutOrCanceledWork(w);
}
}
/**
* Assert the condition expected to be met for a given terminal state.
*
* @param condition the condition
* @param terminalState the work terminal state.
*
* @throws IllegalStateException if the condition is not met.
*/
private void assertCondition(boolean condition, WorkTerminalState terminalState) {
if (condition) {
return;
}
final String message = String.format("Illegal invocation of terminate(%s).", terminalState);
throw withPendingKey(logger.atError()).log(new IllegalStateException(message));
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow().
* <p>
* Attempt successful closure of the window.
*/
private void closeWindow() {
sink.emitComplete((signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not close window.");
return false;
});
}
/**
* CONTRACT: The call site must originate from serialized drain-loop i.e.,
* drainLoop() -> terminate(terminalState) -> closeWindow(e).
* <p>
* Attempt to close the window with error.
*
* @param e the error to close the window with.
*/
private void closeWindow(Throwable e) {
sink.emitError(e, (signalType, emitResult) -> {
logger.atError()
.addKeyValue(SIGNAL_TYPE_KEY, signalType)
.addKeyValue(EMIT_RESULT_KEY, emitResult)
.log("Could not closed window with error.");
return false;
});
}
/**
* Annotate the logger with current number of items pending to meet the window demand.
*
* @param logger the logger to annotate.
* @return the annotated logger.
*/
private LoggingEventBuilder withPendingKey(LoggingEventBuilder logger) {
return logger.addKeyValue(PENDING_KEY, pending.get());
}
/**
* Gets the logger (used by parent subscriber when initializing this work).
*
* @return the logger.
*/
private LoggingEventBuilder getLogger() {
return withPendingKey(logger.atVerbose());
}
/**
* Creates the logger for the work.
*
* @param parentLogContext the parent logging context.
* @param id the identifier for the work.
* @param demand the demand for the window this work represents.
* @return the logger for the work.
*/
private static ClientLogger createLogger(Map<String, Object> parentLogContext, long id, int demand) {
final Map<String, Object> loggingContext = new HashMap<>(parentLogContext.size() + 5);
loggingContext.putAll(parentLogContext);
loggingContext.put(WORK_ID_KEY, id);
loggingContext.put(DEMAND_KEY, demand);
return new ClientLogger(WindowWork.class, loggingContext);
}
/**
* Creates the sink to signal window events.
* <p>
* Events signaling (items, termination) to this sink will be serialized by the parent's drain-loop,
* <ul>
* <li>items signaling {@link WindowWork
* <li>termination signaling {@link WindowWork
* </ul>
* </p>
* <p>
* There will be two subscribers to this sink,
* <ul>
* <li>subscription for next item timeout.</li>
* <li>subscription from IterableStream.</li>
* </ul>
* to support this multi-subscription broadcasting use case, the sink is replay-able.
* </p>
* <p>
* This sink has an internal unbounded queue acting as a buffer between the drain-loop and the consumer of the
* window. This allows drain-loop to enqueue the window items as fast as it could and move to processing next
* window work. The window size will be the cap for the number of items that gets enqueued.
* </p>
*
* @return the sink to signal window events (items and termination).
* @param <T> type of items in the sink.
*/
private static <T> Sinks.Many<T> createSink() {
return Sinks.many().replay().all();
}
/**
* A type describing a successful completion of timeout or an error while scheduling or waiting for timeout.
*/
private static final class TimeoutReason {
static final TimeoutReason TIMEOUT = new TimeoutReason("Timeout occurred.", null);
static final TimeoutReason TIMEOUT_NEXT_ITEM = new TimeoutReason("Timeout between the messages occurred.", null);
private final String message;
private final Throwable error;
/**
* Create reason describing the error while scheduling or waiting for timeout.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout.", error);
}
/**
* Create reason describing the error while scheduling or waiting for timeout between items.
*
* @param error indicates anything internal to Reactor failing timeout attempt. E.g., error in scheduling
* timeout task when internal Scheduler throws {@link RejectedExecutionException}.
* @return the reason.
*/
static TimeoutReason timeoutNextItemErrored(Throwable error) {
return new TimeoutReason("Error while scheduling or waiting for timeout between the messages.", error);
}
private TimeoutReason(String message, Throwable error) {
this.message = message;
this.error = error;
}
String getMessage() {
return message;
}
Throwable getError() {
return error;
}
}
} |
For tests that requires clientId, one would need to set it in env. Though currently not sure which to set. | protected String clientIdFromFile() {
String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
return testResourceNamer.recordValueFromConfig(clientId);
} | String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID); | protected String clientIdFromFile() {
String clientId = Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_CLIENT_ID);
return testResourceNamer.recordValueFromConfig(clientId);
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
@Override
public void write(byte[] b) {
}
@Override
public void write(byte[] b, int off, int len) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60));
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version"))));
addSanitizers();
}
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String tenantId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID),
"'AZURE_TENANT_ID' environment variable cannot be null.");
String subscriptionId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID),
"'AZURE_SUBSCRIPTION_ID' environment variable cannot be null.");
credential = new DefaultAzureCredentialBuilder()
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
}
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException ignored) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} | class ResourceManagerTestProxyTestBase extends TestProxyTestBase {
private static final String ZERO_UUID = "00000000-0000-0000-0000-000000000000";
private static final String ZERO_SUBSCRIPTION = ZERO_UUID;
private static final String ZERO_TENANT = ZERO_UUID;
private static final String PLAYBACK_URI_BASE = "https:
private static final String AZURE_AUTH_LOCATION = "AZURE_AUTH_LOCATION";
private static final String AZURE_TEST_LOG_LEVEL = "AZURE_TEST_LOG_LEVEL";
private static final String HTTPS_PROXY_HOST = "https.proxyHost";
private static final String HTTPS_PROXY_PORT = "https.proxyPort";
private static final String HTTP_PROXY_HOST = "http.proxyHost";
private static final String HTTP_PROXY_PORT = "http.proxyPort";
private static final String USE_SYSTEM_PROXY = "java.net.useSystemProxies";
private static final String VALUE_TRUE = "true";
private static final String PLAYBACK_URI = PLAYBACK_URI_BASE + "1234";
private static final AzureProfile PLAYBACK_PROFILE = new AzureProfile(
ZERO_TENANT,
ZERO_SUBSCRIPTION,
new AzureEnvironment(Arrays.stream(AzureEnvironment.Endpoint.values())
.collect(Collectors.toMap(AzureEnvironment.Endpoint::identifier, endpoint -> PLAYBACK_URI)))
);
private static final OutputStream EMPTY_OUTPUT_STREAM = new OutputStream() {
@Override
public void write(int b) {
}
@Override
public void write(byte[] b) {
}
@Override
public void write(byte[] b, int off, int len) {
}
};
/**
* Redacted value.
*/
protected static final String REDACTED_VALUE = "REDACTED";
private static final ClientLogger LOGGER = new ClientLogger(ResourceManagerTestProxyTestBase.class);
private AzureProfile testProfile;
private boolean isSkipInPlayback;
private final List<TestProxySanitizer> sanitizers = new ArrayList<>();
/**
* Sets upper bound execution timeout for each @Test method.
* {@link org.junit.jupiter.api.Timeout} annotation on test methods will only narrow the timeout, not affecting the upper
* bound.
*/
@RegisterExtension
final PlaybackTimeoutInterceptor playbackTimeoutInterceptor = new PlaybackTimeoutInterceptor(() -> Duration.ofSeconds(60));
/**
* Generates a random resource name.
*
* @param prefix Prefix for the resource name.
* @param maxLen Maximum length of the resource name.
* @return A randomly generated resource name with a given prefix and maximum length.
*/
protected String generateRandomResourceName(String prefix, int maxLen) {
return testResourceNamer.randomName(prefix, maxLen);
}
/**
* @return A randomly generated UUID.
*/
protected String generateRandomUuid() {
return testResourceNamer.randomUuid();
}
/**
* @return random password
*/
public static String password() {
String password = new ResourceNamer("").randomName("Pa5$", 12);
LOGGER.info("Password: {}", password);
return password;
}
private static String sshPublicKey;
/**
* @return an SSH public key
*/
public static String sshPublicKey() {
if (sshPublicKey == null) {
try {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
keyGen.initialize(1024);
KeyPair pair = keyGen.generateKeyPair();
PublicKey publicKey = pair.getPublic();
RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey;
ByteArrayOutputStream byteOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(byteOs);
dos.writeInt("ssh-rsa".getBytes(StandardCharsets.US_ASCII).length);
dos.write("ssh-rsa".getBytes(StandardCharsets.US_ASCII));
dos.writeInt(rsaPublicKey.getPublicExponent().toByteArray().length);
dos.write(rsaPublicKey.getPublicExponent().toByteArray());
dos.writeInt(rsaPublicKey.getModulus().toByteArray().length);
dos.write(rsaPublicKey.getModulus().toByteArray());
String publicKeyEncoded = new String(Base64.getEncoder().encode(byteOs.toByteArray()), StandardCharsets.US_ASCII);
sshPublicKey = "ssh-rsa " + publicKeyEncoded;
} catch (NoSuchAlgorithmException | IOException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException("failed to generate ssh key", e));
}
}
return sshPublicKey;
}
/**
* Loads a client ID from file.
*
* @return A client ID loaded from a file.
*/
/**
* @return The test profile.
*/
protected AzureProfile profile() {
return testProfile;
}
/**
* @return Whether the test mode is {@link TestMode
*/
protected boolean isPlaybackMode() {
return getTestMode() == TestMode.PLAYBACK;
}
/**
* @return Whether the test should be skipped in playback.
*/
protected boolean skipInPlayback() {
if (isPlaybackMode()) {
isSkipInPlayback = true;
}
return isSkipInPlayback;
}
@Override
protected void beforeTest() {
TokenCredential credential;
HttpPipeline httpPipeline;
String logLevel = Configuration.getGlobalConfiguration().get(AZURE_TEST_LOG_LEVEL);
HttpLogDetailLevel httpLogDetailLevel;
try {
httpLogDetailLevel = HttpLogDetailLevel.valueOf(logLevel);
} catch (Exception e) {
if (isPlaybackMode()) {
httpLogDetailLevel = HttpLogDetailLevel.NONE;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'NONE' for PLAYBACK.", AZURE_TEST_LOG_LEVEL);
} else {
httpLogDetailLevel = HttpLogDetailLevel.BODY_AND_HEADERS;
LOGGER.error("Environment variable '{}' has not been set yet. Using 'BODY_AND_HEADERS' for RECORD/LIVE.", AZURE_TEST_LOG_LEVEL);
}
}
if (httpLogDetailLevel == HttpLogDetailLevel.NONE) {
try {
System.setOut(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
System.setErr(new PrintStream(EMPTY_OUTPUT_STREAM, false, Charset.defaultCharset().name()));
} catch (UnsupportedEncodingException e) {
}
}
if (isPlaybackMode()) {
testProfile = PLAYBACK_PROFILE;
List<HttpPipelinePolicy> policies = new ArrayList<>();
httpPipeline = buildHttpPipeline(
request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)),
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
interceptorManager.getPlaybackClient());
if (!testContextManager.doNotRecordTest()) {
interceptorManager.addMatchers(Collections.singletonList(new CustomMatcher().setIgnoredQueryParameters(Arrays.asList("api-version"))));
addSanitizers();
}
} else {
Configuration configuration = Configuration.getGlobalConfiguration();
String tenantId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_TENANT_ID),
"'AZURE_TENANT_ID' environment variable cannot be null.");
String subscriptionId = Objects.requireNonNull(
configuration.get(Configuration.PROPERTY_AZURE_SUBSCRIPTION_ID),
"'AZURE_SUBSCRIPTION_ID' environment variable cannot be null.");
credential = new DefaultAzureCredentialBuilder()
.authorityHost(AzureEnvironment.AZURE.getActiveDirectoryEndpoint())
.build();
testProfile = new AzureProfile(tenantId, subscriptionId, AzureEnvironment.AZURE);
List<HttpPipelinePolicy> policies = new ArrayList<>();
if (interceptorManager.isRecordMode() && !testContextManager.doNotRecordTest()) {
policies.add(this.interceptorManager.getRecordPolicy());
addSanitizers();
}
if (httpLogDetailLevel == HttpLogDetailLevel.BODY_AND_HEADERS) {
policies.add(new HttpDebugLoggingPolicy());
httpLogDetailLevel = HttpLogDetailLevel.NONE;
}
httpPipeline = buildHttpPipeline(
credential,
testProfile,
new HttpLogOptions().setLogLevel(httpLogDetailLevel),
policies,
generateHttpClientWithProxy(null, null));
}
initializeClients(httpPipeline, testProfile);
}
/**
* Generates an {@link HttpClient} with a proxy.
*
* @param clientBuilder The HttpClient builder.
* @param proxyOptions The proxy.
* @return An HttpClient with a proxy.
*/
protected HttpClient generateHttpClientWithProxy(NettyAsyncHttpClientBuilder clientBuilder, ProxyOptions proxyOptions) {
if (clientBuilder == null) {
clientBuilder = new NettyAsyncHttpClientBuilder();
}
if (proxyOptions != null) {
clientBuilder.proxy(proxyOptions);
} else {
try {
System.setProperty(USE_SYSTEM_PROXY, VALUE_TRUE);
List<Proxy> proxies = ProxySelector.getDefault().select(new URI(AzureEnvironment.AZURE.getResourceManagerEndpoint()));
if (!proxies.isEmpty()) {
for (Proxy proxy : proxies) {
if (proxy.address() instanceof InetSocketAddress) {
String host = ((InetSocketAddress) proxy.address()).getHostName();
int port = ((InetSocketAddress) proxy.address()).getPort();
switch (proxy.type()) {
case HTTP:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port))).build();
case SOCKS:
return clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.SOCKS5, new InetSocketAddress(host, port))).build();
default:
}
}
}
}
String host = null;
int port = 0;
if (System.getProperty(HTTPS_PROXY_HOST) != null && System.getProperty(HTTPS_PROXY_PORT) != null) {
host = System.getProperty(HTTPS_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTPS_PROXY_PORT));
} else if (System.getProperty(HTTP_PROXY_HOST) != null && System.getProperty(HTTP_PROXY_PORT) != null) {
host = System.getProperty(HTTP_PROXY_HOST);
port = Integer.parseInt(System.getProperty(HTTP_PROXY_PORT));
}
if (host != null) {
clientBuilder.proxy(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress(host, port)));
}
} catch (URISyntaxException ignored) { }
}
return clientBuilder.build();
}
@Override
protected void afterTest() {
if (!isSkipInPlayback) {
cleanUpResources();
}
}
/**
* Sets sdk context when running the tests
*
* @param internalContext the internal runtime context
* @param objects the manager classes to change internal context
* @param <T> the type of internal context
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> void setInternalContext(T internalContext, Object... objects) {
try {
for (Object obj : objects) {
for (final Field field : obj.getClass().getSuperclass().getDeclaredFields()) {
if (field.getName().equals("resourceManager")) {
setAccessible(field);
Field context = field.get(obj).getClass().getDeclaredField("internalContext");
setAccessible(context);
context.set(field.get(obj), internalContext);
}
}
for (Field field : obj.getClass().getDeclaredFields()) {
if (field.getName().equals("internalContext")) {
setAccessible(field);
field.set(obj, internalContext);
} else if (field.getName().contains("Manager")) {
setAccessible(field);
setInternalContext(internalContext, field.get(obj));
}
}
}
} catch (IllegalAccessException | NoSuchFieldException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
private void setAccessible(final AccessibleObject accessibleObject) {
Runnable runnable = () -> accessibleObject.setAccessible(true);
runnable.run();
}
/**
* Builds the manager with provided http pipeline and profile in general manner.
*
* @param manager the class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
}
/**
* Builds an HttpPipeline.
*
* @param credential The credentials to use in the pipeline.
* @param profile The AzureProfile to use in the pipeline.
* @param httpLogOptions The HTTP logging options to use in the pipeline.
* @param policies Additional policies to use in the pipeline.
* @param httpClient The HttpClient to use in the pipeline.
* @return A new constructed HttpPipeline.
*/
protected abstract HttpPipeline buildHttpPipeline(
TokenCredential credential,
AzureProfile profile,
HttpLogOptions httpLogOptions,
List<HttpPipelinePolicy> policies,
HttpClient httpClient);
/**
* Initializes service clients used in testing.
*
* @param httpPipeline The HttpPipeline to use in the clients.
* @param profile The AzureProfile to use in the clients.
*/
protected abstract void initializeClients(HttpPipeline httpPipeline, AzureProfile profile);
/**
* Cleans up resources.
*/
protected abstract void cleanUpResources();
private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
}
/**
* Adds test proxy sanitizers.
* <p>
* Recommend to call this API in subclass constructor.
*
* @param sanitizers the test proxy sanitizers.
*/
protected void addSanitizers(TestProxySanitizer... sanitizers) {
this.sanitizers.addAll(Arrays.asList(sanitizers));
}
private final class PlaybackTimeoutInterceptor implements InvocationInterceptor {
private final Duration duration;
private PlaybackTimeoutInterceptor(Supplier<Duration> timeoutSupplier) {
Objects.requireNonNull(timeoutSupplier);
this.duration = timeoutSupplier.get();
}
@Override
public void interceptTestMethod(Invocation<Void> invocation,
ReflectiveInvocationContext<Method> invocationContext,
ExtensionContext extensionContext) throws Throwable {
if (isPlaybackMode()) {
Assertions.assertTimeoutPreemptively(duration, invocation::proceed);
} else {
invocation.proceed();
}
}
}
} |
Wouldn't `Objects.equals(SENTINEL, null)` return false? | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | if (!Objects.equals(SENTINEL, data)) { | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} |
Should we consider having `containsKey` method in InternalContext? | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | data = contexts[i].get(key); | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} |
Correct, this is to allow `null` values to be set in Context and returned properly. If we had - key: foo \ - key2: bar \ - key: null We'd want null to be returned as it is last in the chain | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | if (!Objects.equals(SENTINEL, data)) { | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} |
We could, or we could have InternalContext use Optional whereas the public Context doesn't. I chose to go with the sentinel value as it is the cheapest computation wise. | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | data = contexts[i].get(key); | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} |
I'm ok with this. | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | data = contexts[i].get(key); | public Object get(Object key) {
Object data = SENTINEL;
for (int i = contexts.length - 1; i >= 0; i--) {
data = contexts[i].get(key);
if (!Objects.equals(SENTINEL, data)) {
return data;
}
}
return data;
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} | class InternalContextN implements InternalContext {
private static final Object SENTINEL = new Object();
private final InternalContext[] contexts;
private final int count;
private final Object key;
private final Object value;
InternalContextN(InternalContext... contexts) {
this.contexts = contexts;
int count = 0;
for (InternalContext context : contexts) {
count += context.size();
}
this.count = count;
this.key = contexts[contexts.length - 1].getKey();
this.value = contexts[contexts.length - 1].getValue();
}
private InternalContextN(int count, Object key, Object value, InternalContext[] contexts) {
this.count = count;
this.key = key;
this.value = value;
this.contexts = contexts;
}
@Override
public Object getKey() {
return key;
}
@Override
public Object getValue() {
return value;
}
@Override
public int size() {
return count;
}
@Override
public InternalContext put(Object key, Object value) {
InternalContext last = contexts[contexts.length - 1];
if (last.size() < 4) {
InternalContext[] newContexts = Arrays.copyOf(contexts, contexts.length);
newContexts[contexts.length - 1] = last.put(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
} else {
InternalContext[] newContexts = new InternalContext[contexts.length + 1];
System.arraycopy(contexts, 0, newContexts, 0, contexts.length);
newContexts[contexts.length] = new InternalContext1(key, value);
return new InternalContextN(count + 1, key, value, newContexts);
}
}
@Override
} |
Ah - we have both xml and json support in json-jackson package. Package name is misleading. | public byte[] serializeToBytes(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToBytes((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsBytes(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString().getBytes(StandardCharsets.UTF_8);
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToBytes((JsonSerializable<?>) object)
: mapper.writeValueAsBytes(object);
}
} | ? serializeXmlSerializableToBytes((XmlSerializable<?>) object) | public byte[] serializeToBytes(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToBytes((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsBytes(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString().getBytes(StandardCharsets.UTF_8);
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToBytes((JsonSerializable<?>) object)
: mapper.writeValueAsBytes(object);
}
} | class JacksonAdapter implements SerializerAdapter {
private static final ClientLogger LOGGER = new ClientLogger(JacksonAdapter.class);
private enum GlobalXmlMapper {
XML_MAPPER(ObjectMapperShim.createXmlMapper());
private final ObjectMapperShim xmlMapper;
GlobalXmlMapper(ObjectMapperShim xmlMapper) {
this.xmlMapper = xmlMapper;
}
private ObjectMapperShim getXmlMapper() {
return xmlMapper;
}
}
private enum GlobalSerializerAdapter {
SERIALIZER_ADAPTER(new JacksonAdapter());
private final SerializerAdapter serializerAdapter;
GlobalSerializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
}
private SerializerAdapter getSerializerAdapter() {
return serializerAdapter;
}
}
/**
* An instance of {@link ObjectMapperShim} to serialize/deserialize objects.
*/
private final ObjectMapperShim mapper;
private final ObjectMapperShim headerMapper;
private JacksonAdapter() {
this.headerMapper = ObjectMapperShim.createHeaderMapper();
this.mapper = ObjectMapperShim.createJsonMapper(ObjectMapperShim.createSimpleMapper());
}
/**
* maintain singleton instance of the default serializer adapter.
*
* @return the default serializer
*/
public static SerializerAdapter defaultSerializerAdapter() {
return GlobalSerializerAdapter.SERIALIZER_ADAPTER.getSerializerAdapter();
}
@Override
public String serialize(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToString((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsString(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString();
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToString((JsonSerializable<?>) object)
: mapper.writeValueAsString(object);
}
}
@Override
@Override
public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException {
if (object == null) {
return;
}
if (encoding == SerializerEncoding.XML) {
if (supportsXmlSerializable(object.getClass())) {
serializeXmlSerializableIntoOutputStream((XmlSerializable<?>) object, outputStream);
} else {
getXmlMapper().writeValue(outputStream, object);
}
} else if (encoding == SerializerEncoding.TEXT) {
outputStream.write(object.toString().getBytes(StandardCharsets.UTF_8));
} else {
if (supportsJsonSerializable(object.getClass())) {
serializeJsonSerializableIntoOutputStream((JsonSerializable<?>) object, outputStream);
} else {
mapper.writeValue(outputStream, object);
}
}
}
@Override
public String serializeRaw(Object object) {
if (object == null) {
return null;
}
try {
return removeLeadingAndTrailingQuotes(serialize(object, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/*
* Used by 'serializeRaw' to removal all leading and trailing quotes (").
*/
static String removeLeadingAndTrailingQuotes(String str) {
int strLength = str.length();
int startOffset = 0;
while (startOffset < strLength) {
if (str.charAt(startOffset) != '"') {
break;
}
startOffset++;
}
if (startOffset == strLength) {
return "";
}
int endOffset = strLength - 1;
while (endOffset >= 0) {
if (str.charAt(endOffset) != '"') {
break;
}
endOffset--;
}
return str.substring(startOffset, endOffset + 1);
}
@Override
public String serializeList(List<?> list, CollectionFormat format) {
return serializeIterable(list, format);
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException {
if (CoreUtils.isNullOrEmpty(value)) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: getXmlMapper().readValue(value, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(value, type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: mapper.readValue(value, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
if (bytes == null || bytes.length == 0) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, bytes)
: getXmlMapper().readValue(bytes, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(CoreUtils.bomAwareToString(bytes, null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, bytes)
: mapper.readValue(bytes, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException {
if (inputStream == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, inputStreamToBytes(inputStream))
: getXmlMapper().readValue(inputStream, type);
} else if (encoding == SerializerEncoding.TEXT) {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return (T) deserializeText(outputStream.bomAwareToString(null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, inputStreamToBytes(inputStream))
: mapper.readValue(inputStream, type);
}
}
private static byte[] inputStreamToBytes(InputStream inputStream) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return outputStream.toByteArray();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private static Object deserializeText(String value, Type type) throws IOException {
if (type == String.class || type == CharSequence.class) {
return value;
} else if (type == int.class || type == Integer.class) {
return Integer.parseInt(value);
} else if (type == char.class || type == Character.class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.charAt(0);
} else if (type == byte.class || type == Byte.class) {
return CoreUtils.isNullOrEmpty(value) ? null : (byte) value.charAt(0);
} else if (type == byte[].class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.getBytes(StandardCharsets.UTF_8);
} else if (type == long.class || type == Long.class) {
return Long.parseLong(value);
} else if (type == short.class || type == Short.class) {
return Short.parseShort(value);
} else if (type == float.class || type == Float.class) {
return Float.parseFloat(value);
} else if (type == double.class || type == Double.class) {
return Double.parseDouble(value);
} else if (type == boolean.class || type == Boolean.class) {
return Boolean.parseBoolean(value);
} else if (type == OffsetDateTime.class) {
return OffsetDateTime.parse(value);
} else if (type == DateTimeRfc1123.class) {
return new DateTimeRfc1123(value);
} else if (type == URL.class) {
try {
return ImplUtils.createUrl(value);
} catch (MalformedURLException ex) {
throw new IOException(ex);
}
} else if (type == URI.class) {
return URI.create(value);
} else if (type == UUID.class) {
return UUID.fromString(value);
} else if (type == LocalDate.class) {
return LocalDate.parse(value);
} else if (Enum.class.isAssignableFrom((Class<?>) type)) {
return Enum.valueOf((Class) type, value);
} else if (ExpandableStringEnum.class.isAssignableFrom((Class<?>) type)) {
try {
return ((Class<?>) type).getDeclaredMethod("fromString", String.class).invoke(null, value);
} catch (ReflectiveOperationException ex) {
throw new IOException(ex);
}
} else {
throw new IllegalStateException("Unsupported text Content-Type Type: " + type);
}
}
@Override
public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException {
return headerMapper.deserialize(headers, deserializedHeadersType);
}
@Override
public <T> T deserializeHeader(Header header, Type type) throws IOException {
return headerMapper.readValue(header.getValue(), type);
}
private ObjectMapperShim getXmlMapper() {
return GlobalXmlMapper.XML_MAPPER.getXmlMapper();
}
} | class JacksonAdapter implements SerializerAdapter {
private static final ClientLogger LOGGER = new ClientLogger(JacksonAdapter.class);
private enum GlobalXmlMapper {
XML_MAPPER(ObjectMapperShim.createXmlMapper());
private final ObjectMapperShim xmlMapper;
GlobalXmlMapper(ObjectMapperShim xmlMapper) {
this.xmlMapper = xmlMapper;
}
private ObjectMapperShim getXmlMapper() {
return xmlMapper;
}
}
private enum GlobalSerializerAdapter {
SERIALIZER_ADAPTER(new JacksonAdapter());
private final SerializerAdapter serializerAdapter;
GlobalSerializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
}
private SerializerAdapter getSerializerAdapter() {
return serializerAdapter;
}
}
/**
* An instance of {@link ObjectMapperShim} to serialize/deserialize objects.
*/
private final ObjectMapperShim mapper;
private final ObjectMapperShim headerMapper;
private JacksonAdapter() {
this.headerMapper = ObjectMapperShim.createHeaderMapper();
this.mapper = ObjectMapperShim.createJsonMapper(ObjectMapperShim.createSimpleMapper());
}
/**
* maintain singleton instance of the default serializer adapter.
*
* @return the default serializer
*/
public static SerializerAdapter defaultSerializerAdapter() {
return GlobalSerializerAdapter.SERIALIZER_ADAPTER.getSerializerAdapter();
}
@Override
public String serialize(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToString((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsString(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString();
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToString((JsonSerializable<?>) object)
: mapper.writeValueAsString(object);
}
}
@Override
@Override
public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException {
if (object == null) {
return;
}
if (encoding == SerializerEncoding.XML) {
if (supportsXmlSerializable(object.getClass())) {
serializeXmlSerializableIntoOutputStream((XmlSerializable<?>) object, outputStream);
} else {
getXmlMapper().writeValue(outputStream, object);
}
} else if (encoding == SerializerEncoding.TEXT) {
outputStream.write(object.toString().getBytes(StandardCharsets.UTF_8));
} else {
if (supportsJsonSerializable(object.getClass())) {
serializeJsonSerializableIntoOutputStream((JsonSerializable<?>) object, outputStream);
} else {
mapper.writeValue(outputStream, object);
}
}
}
@Override
public String serializeRaw(Object object) {
if (object == null) {
return null;
}
try {
return removeLeadingAndTrailingQuotes(serialize(object, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/*
* Used by 'serializeRaw' to removal all leading and trailing quotes (").
*/
static String removeLeadingAndTrailingQuotes(String str) {
int strLength = str.length();
int startOffset = 0;
while (startOffset < strLength) {
if (str.charAt(startOffset) != '"') {
break;
}
startOffset++;
}
if (startOffset == strLength) {
return "";
}
int endOffset = strLength - 1;
while (endOffset >= 0) {
if (str.charAt(endOffset) != '"') {
break;
}
endOffset--;
}
return str.substring(startOffset, endOffset + 1);
}
@Override
public String serializeList(List<?> list, CollectionFormat format) {
return serializeIterable(list, format);
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException {
if (CoreUtils.isNullOrEmpty(value)) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: getXmlMapper().readValue(value, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(value, type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: mapper.readValue(value, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
if (bytes == null || bytes.length == 0) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, bytes)
: getXmlMapper().readValue(bytes, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(CoreUtils.bomAwareToString(bytes, null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, bytes)
: mapper.readValue(bytes, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException {
if (inputStream == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, inputStreamToBytes(inputStream))
: getXmlMapper().readValue(inputStream, type);
} else if (encoding == SerializerEncoding.TEXT) {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return (T) deserializeText(outputStream.bomAwareToString(null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, inputStreamToBytes(inputStream))
: mapper.readValue(inputStream, type);
}
}
private static byte[] inputStreamToBytes(InputStream inputStream) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return outputStream.toByteArray();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private static Object deserializeText(String value, Type type) throws IOException {
if (type == String.class || type == CharSequence.class) {
return value;
} else if (type == int.class || type == Integer.class) {
return Integer.parseInt(value);
} else if (type == char.class || type == Character.class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.charAt(0);
} else if (type == byte.class || type == Byte.class) {
return CoreUtils.isNullOrEmpty(value) ? null : (byte) value.charAt(0);
} else if (type == byte[].class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.getBytes(StandardCharsets.UTF_8);
} else if (type == long.class || type == Long.class) {
return Long.parseLong(value);
} else if (type == short.class || type == Short.class) {
return Short.parseShort(value);
} else if (type == float.class || type == Float.class) {
return Float.parseFloat(value);
} else if (type == double.class || type == Double.class) {
return Double.parseDouble(value);
} else if (type == boolean.class || type == Boolean.class) {
return Boolean.parseBoolean(value);
} else if (type == OffsetDateTime.class) {
return OffsetDateTime.parse(value);
} else if (type == DateTimeRfc1123.class) {
return new DateTimeRfc1123(value);
} else if (type == URL.class) {
try {
return ImplUtils.createUrl(value);
} catch (MalformedURLException ex) {
throw new IOException(ex);
}
} else if (type == URI.class) {
return URI.create(value);
} else if (type == UUID.class) {
return UUID.fromString(value);
} else if (type == LocalDate.class) {
return LocalDate.parse(value);
} else if (Enum.class.isAssignableFrom((Class<?>) type)) {
return Enum.valueOf((Class) type, value);
} else if (ExpandableStringEnum.class.isAssignableFrom((Class<?>) type)) {
try {
return ((Class<?>) type).getDeclaredMethod("fromString", String.class).invoke(null, value);
} catch (ReflectiveOperationException ex) {
throw new IOException(ex);
}
} else {
throw new IllegalStateException("Unsupported text Content-Type Type: " + type);
}
}
@Override
public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException {
return headerMapper.deserialize(headers, deserializedHeadersType);
}
@Override
public <T> T deserializeHeader(Header header, Type type) throws IOException {
return headerMapper.readValue(header.getValue(), type);
}
private ObjectMapperShim getXmlMapper() {
return GlobalXmlMapper.XML_MAPPER.getXmlMapper();
}
} |
Isn't it sufficient to check whether the class is implementing XmlSerializable? Do we need to check if fromXml and toXml methods are present? | public static boolean supportsXmlSerializable(Class<?> bodyContentClass) {
if (FROM_XML_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!XmlSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromXml = false;
boolean hasToXml = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromXml")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlReader.class)) {
hasFromXml = true;
} else if (method.getName().equals("toXml")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlWriter.class)) {
hasToXml = true;
}
if (hasFromXml && hasToXml) {
return true;
}
}
return false;
} | } | public static boolean supportsXmlSerializable(Class<?> bodyContentClass) {
if (FROM_XML_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!XmlSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromXml = false;
boolean hasToXml = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromXml")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlReader.class)) {
hasFromXml = true;
} else if (method.getName().equals("toXml")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlWriter.class)) {
hasToXml = true;
}
if (hasFromXml && hasToXml) {
return true;
}
}
return false;
} | class ReflectionSerializable {
private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class);
private static final Map<Class<?>, ReflectiveInvoker> FROM_JSON_CACHE;
private static final Map<Class<?>, ReflectiveInvoker> FROM_XML_CACHE;
static {
FROM_JSON_CACHE = new ConcurrentHashMap<>();
FROM_XML_CACHE = new ConcurrentHashMap<>();
}
/**
* Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}.
*/
public static boolean supportsJsonSerializable(Class<?> bodyContentClass) {
if (FROM_JSON_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!JsonSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromJson = false;
boolean hasToJson = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromJson")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonReader.class)) {
hasFromJson = true;
} else if (method.getName().equals("toJson")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonWriter.class)) {
hasToJson = true;
}
if (hasFromJson && hasToJson) {
return true;
}
}
return false;
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static ByteBuffer serializeJsonSerializableToByteBuffer(JsonSerializable<?> jsonSerializable)
throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@code byte[]} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static byte[] serializeJsonSerializableToBytes(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@link String} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static String serializeJsonSerializableToString(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeJsonSerializableWithReturn(JsonSerializable<?> jsonSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
return returner.apply(outputStream);
}
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @param outputStream Where the serialized {@code JsonSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeJsonSerializableIntoOutputStream(JsonSerializable<?> jsonSerializable,
OutputStream outputStream) throws IOException {
try (JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
}
}
/**
* Deserializes the {@code json} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}.
* @param json The JSON being deserialized.
* @return An instance of {@code jsonSerializable} based on the {@code json}.
* @throws IOException If an error occurs during deserialization.
* @throws IllegalStateException If the {@code jsonSerializable} does not have a static {@code fromJson} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException {
if (FROM_JSON_CACHE.size() >= 10000) {
FROM_JSON_CACHE.clear();
}
ReflectiveInvoker readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(clazz,
jsonSerializable.getDeclaredMethod("fromJson", JsonReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (JsonReader jsonReader = JsonProviders.createReader(json)) {
return readJson.invokeStatic(jsonReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
/**
* Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}.
*/
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static ByteBuffer serializeXmlSerializableToByteBuffer(XmlSerializable<?> xmlSerializable)
throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@code byte[]} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static byte[] serializeXmlSerializableToBytes(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link String} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static String serializeXmlSerializableToString(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeXmlSerializableWithReturn(XmlSerializable<?> xmlSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
return returner.apply(outputStream);
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Serializes the {@code xmlSerializable} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} content.
* @param outputStream Where the serialized {@code XmlSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeXmlSerializableIntoOutputStream(XmlSerializable<?> xmlSerializable,
OutputStream outputStream) throws IOException {
try (XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Deserializes the {@code xml} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}.
* @param xml The XML being deserialized.
* @return An instance of {@code xmlSerializable} based on the {@code xml}.
* @throws IOException If the XmlReader fails to close properly.
* @throws IllegalStateException If the {@code xmlSerializable} does not have a static {@code fromXml} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException {
if (FROM_XML_CACHE.size() >= 10000) {
FROM_XML_CACHE.clear();
}
ReflectiveInvoker readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(xmlSerializable,
xmlSerializable.getDeclaredMethod("fromXml", XmlReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (XmlReader xmlReader = XmlReader.fromBytes(xml)) {
return readXml.invokeStatic(xmlReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
private ReflectionSerializable() {
}
} | class ReflectionSerializable {
private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class);
private static final Map<Class<?>, ReflectiveInvoker> FROM_JSON_CACHE;
private static final Map<Class<?>, ReflectiveInvoker> FROM_XML_CACHE;
static {
FROM_JSON_CACHE = new ConcurrentHashMap<>();
FROM_XML_CACHE = new ConcurrentHashMap<>();
}
/**
* Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}.
*/
public static boolean supportsJsonSerializable(Class<?> bodyContentClass) {
if (FROM_JSON_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!JsonSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromJson = false;
boolean hasToJson = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromJson")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonReader.class)) {
hasFromJson = true;
} else if (method.getName().equals("toJson")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonWriter.class)) {
hasToJson = true;
}
if (hasFromJson && hasToJson) {
return true;
}
}
return false;
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static ByteBuffer serializeJsonSerializableToByteBuffer(JsonSerializable<?> jsonSerializable)
throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@code byte[]} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static byte[] serializeJsonSerializableToBytes(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@link String} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static String serializeJsonSerializableToString(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeJsonSerializableWithReturn(JsonSerializable<?> jsonSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
return returner.apply(outputStream);
}
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @param outputStream Where the serialized {@code JsonSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeJsonSerializableIntoOutputStream(JsonSerializable<?> jsonSerializable,
OutputStream outputStream) throws IOException {
try (JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
}
}
/**
* Deserializes the {@code json} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}.
* @param json The JSON being deserialized.
* @return An instance of {@code jsonSerializable} based on the {@code json}.
* @throws IOException If an error occurs during deserialization.
* @throws IllegalStateException If the {@code jsonSerializable} does not have a static {@code fromJson} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException {
if (FROM_JSON_CACHE.size() >= 10000) {
FROM_JSON_CACHE.clear();
}
ReflectiveInvoker readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(clazz,
jsonSerializable.getDeclaredMethod("fromJson", JsonReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (JsonReader jsonReader = JsonProviders.createReader(json)) {
return readJson.invokeStatic(jsonReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
/**
* Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}.
*/
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static ByteBuffer serializeXmlSerializableToByteBuffer(XmlSerializable<?> xmlSerializable)
throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@code byte[]} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static byte[] serializeXmlSerializableToBytes(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link String} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static String serializeXmlSerializableToString(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeXmlSerializableWithReturn(XmlSerializable<?> xmlSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
return returner.apply(outputStream);
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Serializes the {@code xmlSerializable} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} content.
* @param outputStream Where the serialized {@code XmlSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeXmlSerializableIntoOutputStream(XmlSerializable<?> xmlSerializable,
OutputStream outputStream) throws IOException {
try (XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Deserializes the {@code xml} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}.
* @param xml The XML being deserialized.
* @return An instance of {@code xmlSerializable} based on the {@code xml}.
* @throws IOException If the XmlReader fails to close properly.
* @throws IllegalStateException If the {@code xmlSerializable} does not have a static {@code fromXml} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException {
if (FROM_XML_CACHE.size() >= 10000) {
FROM_XML_CACHE.clear();
}
ReflectiveInvoker readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(xmlSerializable,
xmlSerializable.getDeclaredMethod("fromXml", XmlReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (XmlReader xmlReader = XmlReader.fromBytes(xml)) {
return readXml.invokeStatic(xmlReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
private ReflectionSerializable() {
}
} |
Yeah, it is, this package kind of morphed from being a service provider implementation of `JsonSerializer` using Jackson to including a replacement for JacksonAdapter as well. | public byte[] serializeToBytes(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToBytes((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsBytes(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString().getBytes(StandardCharsets.UTF_8);
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToBytes((JsonSerializable<?>) object)
: mapper.writeValueAsBytes(object);
}
} | ? serializeXmlSerializableToBytes((XmlSerializable<?>) object) | public byte[] serializeToBytes(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToBytes((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsBytes(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString().getBytes(StandardCharsets.UTF_8);
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToBytes((JsonSerializable<?>) object)
: mapper.writeValueAsBytes(object);
}
} | class JacksonAdapter implements SerializerAdapter {
private static final ClientLogger LOGGER = new ClientLogger(JacksonAdapter.class);
private enum GlobalXmlMapper {
XML_MAPPER(ObjectMapperShim.createXmlMapper());
private final ObjectMapperShim xmlMapper;
GlobalXmlMapper(ObjectMapperShim xmlMapper) {
this.xmlMapper = xmlMapper;
}
private ObjectMapperShim getXmlMapper() {
return xmlMapper;
}
}
private enum GlobalSerializerAdapter {
SERIALIZER_ADAPTER(new JacksonAdapter());
private final SerializerAdapter serializerAdapter;
GlobalSerializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
}
private SerializerAdapter getSerializerAdapter() {
return serializerAdapter;
}
}
/**
* An instance of {@link ObjectMapperShim} to serialize/deserialize objects.
*/
private final ObjectMapperShim mapper;
private final ObjectMapperShim headerMapper;
private JacksonAdapter() {
this.headerMapper = ObjectMapperShim.createHeaderMapper();
this.mapper = ObjectMapperShim.createJsonMapper(ObjectMapperShim.createSimpleMapper());
}
/**
* maintain singleton instance of the default serializer adapter.
*
* @return the default serializer
*/
public static SerializerAdapter defaultSerializerAdapter() {
return GlobalSerializerAdapter.SERIALIZER_ADAPTER.getSerializerAdapter();
}
@Override
public String serialize(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToString((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsString(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString();
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToString((JsonSerializable<?>) object)
: mapper.writeValueAsString(object);
}
}
@Override
@Override
public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException {
if (object == null) {
return;
}
if (encoding == SerializerEncoding.XML) {
if (supportsXmlSerializable(object.getClass())) {
serializeXmlSerializableIntoOutputStream((XmlSerializable<?>) object, outputStream);
} else {
getXmlMapper().writeValue(outputStream, object);
}
} else if (encoding == SerializerEncoding.TEXT) {
outputStream.write(object.toString().getBytes(StandardCharsets.UTF_8));
} else {
if (supportsJsonSerializable(object.getClass())) {
serializeJsonSerializableIntoOutputStream((JsonSerializable<?>) object, outputStream);
} else {
mapper.writeValue(outputStream, object);
}
}
}
@Override
public String serializeRaw(Object object) {
if (object == null) {
return null;
}
try {
return removeLeadingAndTrailingQuotes(serialize(object, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/*
* Used by 'serializeRaw' to removal all leading and trailing quotes (").
*/
static String removeLeadingAndTrailingQuotes(String str) {
int strLength = str.length();
int startOffset = 0;
while (startOffset < strLength) {
if (str.charAt(startOffset) != '"') {
break;
}
startOffset++;
}
if (startOffset == strLength) {
return "";
}
int endOffset = strLength - 1;
while (endOffset >= 0) {
if (str.charAt(endOffset) != '"') {
break;
}
endOffset--;
}
return str.substring(startOffset, endOffset + 1);
}
@Override
public String serializeList(List<?> list, CollectionFormat format) {
return serializeIterable(list, format);
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException {
if (CoreUtils.isNullOrEmpty(value)) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: getXmlMapper().readValue(value, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(value, type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: mapper.readValue(value, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
if (bytes == null || bytes.length == 0) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, bytes)
: getXmlMapper().readValue(bytes, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(CoreUtils.bomAwareToString(bytes, null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, bytes)
: mapper.readValue(bytes, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException {
if (inputStream == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, inputStreamToBytes(inputStream))
: getXmlMapper().readValue(inputStream, type);
} else if (encoding == SerializerEncoding.TEXT) {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return (T) deserializeText(outputStream.bomAwareToString(null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, inputStreamToBytes(inputStream))
: mapper.readValue(inputStream, type);
}
}
private static byte[] inputStreamToBytes(InputStream inputStream) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return outputStream.toByteArray();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private static Object deserializeText(String value, Type type) throws IOException {
if (type == String.class || type == CharSequence.class) {
return value;
} else if (type == int.class || type == Integer.class) {
return Integer.parseInt(value);
} else if (type == char.class || type == Character.class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.charAt(0);
} else if (type == byte.class || type == Byte.class) {
return CoreUtils.isNullOrEmpty(value) ? null : (byte) value.charAt(0);
} else if (type == byte[].class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.getBytes(StandardCharsets.UTF_8);
} else if (type == long.class || type == Long.class) {
return Long.parseLong(value);
} else if (type == short.class || type == Short.class) {
return Short.parseShort(value);
} else if (type == float.class || type == Float.class) {
return Float.parseFloat(value);
} else if (type == double.class || type == Double.class) {
return Double.parseDouble(value);
} else if (type == boolean.class || type == Boolean.class) {
return Boolean.parseBoolean(value);
} else if (type == OffsetDateTime.class) {
return OffsetDateTime.parse(value);
} else if (type == DateTimeRfc1123.class) {
return new DateTimeRfc1123(value);
} else if (type == URL.class) {
try {
return ImplUtils.createUrl(value);
} catch (MalformedURLException ex) {
throw new IOException(ex);
}
} else if (type == URI.class) {
return URI.create(value);
} else if (type == UUID.class) {
return UUID.fromString(value);
} else if (type == LocalDate.class) {
return LocalDate.parse(value);
} else if (Enum.class.isAssignableFrom((Class<?>) type)) {
return Enum.valueOf((Class) type, value);
} else if (ExpandableStringEnum.class.isAssignableFrom((Class<?>) type)) {
try {
return ((Class<?>) type).getDeclaredMethod("fromString", String.class).invoke(null, value);
} catch (ReflectiveOperationException ex) {
throw new IOException(ex);
}
} else {
throw new IllegalStateException("Unsupported text Content-Type Type: " + type);
}
}
@Override
public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException {
return headerMapper.deserialize(headers, deserializedHeadersType);
}
@Override
public <T> T deserializeHeader(Header header, Type type) throws IOException {
return headerMapper.readValue(header.getValue(), type);
}
private ObjectMapperShim getXmlMapper() {
return GlobalXmlMapper.XML_MAPPER.getXmlMapper();
}
} | class JacksonAdapter implements SerializerAdapter {
private static final ClientLogger LOGGER = new ClientLogger(JacksonAdapter.class);
private enum GlobalXmlMapper {
XML_MAPPER(ObjectMapperShim.createXmlMapper());
private final ObjectMapperShim xmlMapper;
GlobalXmlMapper(ObjectMapperShim xmlMapper) {
this.xmlMapper = xmlMapper;
}
private ObjectMapperShim getXmlMapper() {
return xmlMapper;
}
}
private enum GlobalSerializerAdapter {
SERIALIZER_ADAPTER(new JacksonAdapter());
private final SerializerAdapter serializerAdapter;
GlobalSerializerAdapter(SerializerAdapter serializerAdapter) {
this.serializerAdapter = serializerAdapter;
}
private SerializerAdapter getSerializerAdapter() {
return serializerAdapter;
}
}
/**
* An instance of {@link ObjectMapperShim} to serialize/deserialize objects.
*/
private final ObjectMapperShim mapper;
private final ObjectMapperShim headerMapper;
private JacksonAdapter() {
this.headerMapper = ObjectMapperShim.createHeaderMapper();
this.mapper = ObjectMapperShim.createJsonMapper(ObjectMapperShim.createSimpleMapper());
}
/**
* maintain singleton instance of the default serializer adapter.
*
* @return the default serializer
*/
public static SerializerAdapter defaultSerializerAdapter() {
return GlobalSerializerAdapter.SERIALIZER_ADAPTER.getSerializerAdapter();
}
@Override
public String serialize(Object object, SerializerEncoding encoding) throws IOException {
if (object == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
return supportsXmlSerializable(object.getClass())
? serializeXmlSerializableToString((XmlSerializable<?>) object)
: getXmlMapper().writeValueAsString(object);
} else if (encoding == SerializerEncoding.TEXT) {
return object.toString();
} else {
return supportsJsonSerializable(object.getClass())
? serializeJsonSerializableToString((JsonSerializable<?>) object)
: mapper.writeValueAsString(object);
}
}
@Override
@Override
public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException {
if (object == null) {
return;
}
if (encoding == SerializerEncoding.XML) {
if (supportsXmlSerializable(object.getClass())) {
serializeXmlSerializableIntoOutputStream((XmlSerializable<?>) object, outputStream);
} else {
getXmlMapper().writeValue(outputStream, object);
}
} else if (encoding == SerializerEncoding.TEXT) {
outputStream.write(object.toString().getBytes(StandardCharsets.UTF_8));
} else {
if (supportsJsonSerializable(object.getClass())) {
serializeJsonSerializableIntoOutputStream((JsonSerializable<?>) object, outputStream);
} else {
mapper.writeValue(outputStream, object);
}
}
}
@Override
public String serializeRaw(Object object) {
if (object == null) {
return null;
}
try {
return removeLeadingAndTrailingQuotes(serialize(object, SerializerEncoding.JSON));
} catch (IOException ex) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(ex));
}
}
/*
* Used by 'serializeRaw' to removal all leading and trailing quotes (").
*/
static String removeLeadingAndTrailingQuotes(String str) {
int strLength = str.length();
int startOffset = 0;
while (startOffset < strLength) {
if (str.charAt(startOffset) != '"') {
break;
}
startOffset++;
}
if (startOffset == strLength) {
return "";
}
int endOffset = strLength - 1;
while (endOffset >= 0) {
if (str.charAt(endOffset) != '"') {
break;
}
endOffset--;
}
return str.substring(startOffset, endOffset + 1);
}
@Override
public String serializeList(List<?> list, CollectionFormat format) {
return serializeIterable(list, format);
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException {
if (CoreUtils.isNullOrEmpty(value)) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: getXmlMapper().readValue(value, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(value, type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, value.getBytes(StandardCharsets.UTF_8))
: mapper.readValue(value, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException {
if (bytes == null || bytes.length == 0) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, bytes)
: getXmlMapper().readValue(bytes, type);
} else if (encoding == SerializerEncoding.TEXT) {
return (T) deserializeText(CoreUtils.bomAwareToString(bytes, null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, bytes)
: mapper.readValue(bytes, type);
}
}
@SuppressWarnings("unchecked")
@Override
public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException {
if (inputStream == null) {
return null;
}
if (encoding == SerializerEncoding.XML) {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsXmlSerializable(rawClass)
? (T) deserializeAsXmlSerializable(rawClass, inputStreamToBytes(inputStream))
: getXmlMapper().readValue(inputStream, type);
} else if (encoding == SerializerEncoding.TEXT) {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return (T) deserializeText(outputStream.bomAwareToString(null), type);
} else {
Class<?> rawClass = TypeUtil.getRawClass(type);
return supportsJsonSerializable(rawClass)
? (T) deserializeAsJsonSerializable(rawClass, inputStreamToBytes(inputStream))
: mapper.readValue(inputStream, type);
}
}
private static byte[] inputStreamToBytes(InputStream inputStream) throws IOException {
AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
byte[] buffer = new byte[8192];
int readCount;
while ((readCount = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, readCount);
}
return outputStream.toByteArray();
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private static Object deserializeText(String value, Type type) throws IOException {
if (type == String.class || type == CharSequence.class) {
return value;
} else if (type == int.class || type == Integer.class) {
return Integer.parseInt(value);
} else if (type == char.class || type == Character.class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.charAt(0);
} else if (type == byte.class || type == Byte.class) {
return CoreUtils.isNullOrEmpty(value) ? null : (byte) value.charAt(0);
} else if (type == byte[].class) {
return CoreUtils.isNullOrEmpty(value) ? null : value.getBytes(StandardCharsets.UTF_8);
} else if (type == long.class || type == Long.class) {
return Long.parseLong(value);
} else if (type == short.class || type == Short.class) {
return Short.parseShort(value);
} else if (type == float.class || type == Float.class) {
return Float.parseFloat(value);
} else if (type == double.class || type == Double.class) {
return Double.parseDouble(value);
} else if (type == boolean.class || type == Boolean.class) {
return Boolean.parseBoolean(value);
} else if (type == OffsetDateTime.class) {
return OffsetDateTime.parse(value);
} else if (type == DateTimeRfc1123.class) {
return new DateTimeRfc1123(value);
} else if (type == URL.class) {
try {
return ImplUtils.createUrl(value);
} catch (MalformedURLException ex) {
throw new IOException(ex);
}
} else if (type == URI.class) {
return URI.create(value);
} else if (type == UUID.class) {
return UUID.fromString(value);
} else if (type == LocalDate.class) {
return LocalDate.parse(value);
} else if (Enum.class.isAssignableFrom((Class<?>) type)) {
return Enum.valueOf((Class) type, value);
} else if (ExpandableStringEnum.class.isAssignableFrom((Class<?>) type)) {
try {
return ((Class<?>) type).getDeclaredMethod("fromString", String.class).invoke(null, value);
} catch (ReflectiveOperationException ex) {
throw new IOException(ex);
}
} else {
throw new IllegalStateException("Unsupported text Content-Type Type: " + type);
}
}
@Override
public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException {
return headerMapper.deserialize(headers, deserializedHeadersType);
}
@Override
public <T> T deserializeHeader(Header header, Type type) throws IOException {
return headerMapper.readValue(header.getValue(), type);
}
private ObjectMapperShim getXmlMapper() {
return GlobalXmlMapper.XML_MAPPER.getXmlMapper();
}
} |
This is done to handle cases where a class extends a class implementing XmlSerializable but it doesn't implement the interface methods. This was found to be an issue win JsonSerializable which is why this logic is being used here. | public static boolean supportsXmlSerializable(Class<?> bodyContentClass) {
if (FROM_XML_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!XmlSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromXml = false;
boolean hasToXml = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromXml")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlReader.class)) {
hasFromXml = true;
} else if (method.getName().equals("toXml")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlWriter.class)) {
hasToXml = true;
}
if (hasFromXml && hasToXml) {
return true;
}
}
return false;
} | } | public static boolean supportsXmlSerializable(Class<?> bodyContentClass) {
if (FROM_XML_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!XmlSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromXml = false;
boolean hasToXml = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromXml")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlReader.class)) {
hasFromXml = true;
} else if (method.getName().equals("toXml")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(XmlWriter.class)) {
hasToXml = true;
}
if (hasFromXml && hasToXml) {
return true;
}
}
return false;
} | class ReflectionSerializable {
private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class);
private static final Map<Class<?>, ReflectiveInvoker> FROM_JSON_CACHE;
private static final Map<Class<?>, ReflectiveInvoker> FROM_XML_CACHE;
static {
FROM_JSON_CACHE = new ConcurrentHashMap<>();
FROM_XML_CACHE = new ConcurrentHashMap<>();
}
/**
* Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}.
*/
public static boolean supportsJsonSerializable(Class<?> bodyContentClass) {
if (FROM_JSON_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!JsonSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromJson = false;
boolean hasToJson = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromJson")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonReader.class)) {
hasFromJson = true;
} else if (method.getName().equals("toJson")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonWriter.class)) {
hasToJson = true;
}
if (hasFromJson && hasToJson) {
return true;
}
}
return false;
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static ByteBuffer serializeJsonSerializableToByteBuffer(JsonSerializable<?> jsonSerializable)
throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@code byte[]} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static byte[] serializeJsonSerializableToBytes(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@link String} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static String serializeJsonSerializableToString(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeJsonSerializableWithReturn(JsonSerializable<?> jsonSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
return returner.apply(outputStream);
}
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @param outputStream Where the serialized {@code JsonSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeJsonSerializableIntoOutputStream(JsonSerializable<?> jsonSerializable,
OutputStream outputStream) throws IOException {
try (JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
}
}
/**
* Deserializes the {@code json} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}.
* @param json The JSON being deserialized.
* @return An instance of {@code jsonSerializable} based on the {@code json}.
* @throws IOException If an error occurs during deserialization.
* @throws IllegalStateException If the {@code jsonSerializable} does not have a static {@code fromJson} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException {
if (FROM_JSON_CACHE.size() >= 10000) {
FROM_JSON_CACHE.clear();
}
ReflectiveInvoker readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(clazz,
jsonSerializable.getDeclaredMethod("fromJson", JsonReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (JsonReader jsonReader = JsonProviders.createReader(json)) {
return readJson.invokeStatic(jsonReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
/**
* Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}.
*/
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static ByteBuffer serializeXmlSerializableToByteBuffer(XmlSerializable<?> xmlSerializable)
throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@code byte[]} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static byte[] serializeXmlSerializableToBytes(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link String} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static String serializeXmlSerializableToString(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeXmlSerializableWithReturn(XmlSerializable<?> xmlSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
return returner.apply(outputStream);
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Serializes the {@code xmlSerializable} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} content.
* @param outputStream Where the serialized {@code XmlSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeXmlSerializableIntoOutputStream(XmlSerializable<?> xmlSerializable,
OutputStream outputStream) throws IOException {
try (XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Deserializes the {@code xml} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}.
* @param xml The XML being deserialized.
* @return An instance of {@code xmlSerializable} based on the {@code xml}.
* @throws IOException If the XmlReader fails to close properly.
* @throws IllegalStateException If the {@code xmlSerializable} does not have a static {@code fromXml} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException {
if (FROM_XML_CACHE.size() >= 10000) {
FROM_XML_CACHE.clear();
}
ReflectiveInvoker readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(xmlSerializable,
xmlSerializable.getDeclaredMethod("fromXml", XmlReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (XmlReader xmlReader = XmlReader.fromBytes(xml)) {
return readXml.invokeStatic(xmlReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
private ReflectionSerializable() {
}
} | class ReflectionSerializable {
private static final ClientLogger LOGGER = new ClientLogger(ReflectionSerializable.class);
private static final Map<Class<?>, ReflectiveInvoker> FROM_JSON_CACHE;
private static final Map<Class<?>, ReflectiveInvoker> FROM_XML_CACHE;
static {
FROM_JSON_CACHE = new ConcurrentHashMap<>();
FROM_XML_CACHE = new ConcurrentHashMap<>();
}
/**
* Whether {@code JsonSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code JsonSerializable}.
*/
public static boolean supportsJsonSerializable(Class<?> bodyContentClass) {
if (FROM_JSON_CACHE.containsKey(bodyContentClass)) {
return true;
}
if (!JsonSerializable.class.isAssignableFrom(bodyContentClass)) {
return false;
}
boolean hasFromJson = false;
boolean hasToJson = false;
for (Method method : bodyContentClass.getDeclaredMethods()) {
if (method.getName().equals("fromJson")
&& (method.getModifiers() & Modifier.STATIC) != 0
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonReader.class)) {
hasFromJson = true;
} else if (method.getName().equals("toJson")
&& method.getParameterCount() == 1
&& method.getParameterTypes()[0].equals(JsonWriter.class)) {
hasToJson = true;
}
if (hasFromJson && hasToJson) {
return true;
}
}
return false;
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static ByteBuffer serializeJsonSerializableToByteBuffer(JsonSerializable<?> jsonSerializable)
throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@code byte[]} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static byte[] serializeJsonSerializableToBytes(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @return The {@link String} representing the serialized {@code jsonSerializable}.
* @throws IOException If an error occurs during serialization.
*/
public static String serializeJsonSerializableToString(JsonSerializable<?> jsonSerializable) throws IOException {
return serializeJsonSerializableWithReturn(jsonSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeJsonSerializableWithReturn(JsonSerializable<?> jsonSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
return returner.apply(outputStream);
}
}
/**
* Serializes the {@code jsonSerializable} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} content.
* @param outputStream Where the serialized {@code JsonSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeJsonSerializableIntoOutputStream(JsonSerializable<?> jsonSerializable,
OutputStream outputStream) throws IOException {
try (JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeJson(jsonSerializable).flush();
}
}
/**
* Deserializes the {@code json} as an instance of {@code JsonSerializable}.
*
* @param jsonSerializable The {@code JsonSerializable} represented by the {@code json}.
* @param json The JSON being deserialized.
* @return An instance of {@code jsonSerializable} based on the {@code json}.
* @throws IOException If an error occurs during deserialization.
* @throws IllegalStateException If the {@code jsonSerializable} does not have a static {@code fromJson} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsJsonSerializable(Class<?> jsonSerializable, byte[] json) throws IOException {
if (FROM_JSON_CACHE.size() >= 10000) {
FROM_JSON_CACHE.clear();
}
ReflectiveInvoker readJson = FROM_JSON_CACHE.computeIfAbsent(jsonSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(clazz,
jsonSerializable.getDeclaredMethod("fromJson", JsonReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (JsonReader jsonReader = JsonProviders.createReader(json)) {
return readJson.invokeStatic(jsonReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
/**
* Whether {@code XmlSerializable} is supported and the {@code bodyContentClass} is an instance of it.
*
* @param bodyContentClass The body content class.
* @return Whether {@code bodyContentClass} can be used as {@code XmlSerializable}.
*/
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link ByteBuffer} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static ByteBuffer serializeXmlSerializableToByteBuffer(XmlSerializable<?> xmlSerializable)
throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteBuffer);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@code byte[]} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static byte[] serializeXmlSerializableToBytes(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, AccessibleByteArrayOutputStream::toByteArray);
}
/**
* Serializes the {@code bodyContent} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} body content.
* @return The {@link String} representing the serialized {@code bodyContent}.
* @throws IOException If the XmlWriter fails to close properly.
*/
public static String serializeXmlSerializableToString(XmlSerializable<?> xmlSerializable) throws IOException {
return serializeXmlSerializableWithReturn(xmlSerializable, aos -> aos.toString(StandardCharsets.UTF_8));
}
private static <T> T serializeXmlSerializableWithReturn(XmlSerializable<?> xmlSerializable,
Function<AccessibleByteArrayOutputStream, T> returner) throws IOException {
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream();
XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
return returner.apply(outputStream);
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Serializes the {@code xmlSerializable} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} content.
* @param outputStream Where the serialized {@code XmlSerializable} will be written.
* @throws IOException If an error occurs during serialization.
*/
public static void serializeXmlSerializableIntoOutputStream(XmlSerializable<?> xmlSerializable,
OutputStream outputStream) throws IOException {
try (XmlWriter xmlWriter = XmlWriter.toStream(outputStream)) {
xmlWriter.writeStartDocument();
xmlWriter.writeXml(xmlSerializable);
xmlWriter.flush();
} catch (XMLStreamException ex) {
throw new IOException(ex);
}
}
/**
* Deserializes the {@code xml} as an instance of {@code XmlSerializable}.
*
* @param xmlSerializable The {@code XmlSerializable} represented by the {@code xml}.
* @param xml The XML being deserialized.
* @return An instance of {@code xmlSerializable} based on the {@code xml}.
* @throws IOException If the XmlReader fails to close properly.
* @throws IllegalStateException If the {@code xmlSerializable} does not have a static {@code fromXml} method
* @throws Error If an error occurs during deserialization.
*/
public static Object deserializeAsXmlSerializable(Class<?> xmlSerializable, byte[] xml) throws IOException {
if (FROM_XML_CACHE.size() >= 10000) {
FROM_XML_CACHE.clear();
}
ReflectiveInvoker readXml = FROM_XML_CACHE.computeIfAbsent(xmlSerializable, clazz -> {
try {
return ReflectionUtils.getMethodInvoker(xmlSerializable,
xmlSerializable.getDeclaredMethod("fromXml", XmlReader.class));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
});
try (XmlReader xmlReader = XmlReader.fromBytes(xml)) {
return readXml.invokeStatic(xmlReader);
} catch (Throwable e) {
if (e instanceof IOException) {
throw (IOException) e;
} else if (e instanceof Exception) {
throw new IOException(e);
} else {
throw (Error) e;
}
}
}
private ReflectionSerializable() {
}
} |
should we have a check for the actual value also ? | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
} | assertNotNull(createdKey.getProperties().getHsmPlatform()); | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
should we have a check for the actual value also ? | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | assertNotNull(createdKey.getProperties().getHsmPlatform()); | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
should we have a check for the actual value also ? | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | assertNotNull(retrievedKey.getProperties().getHsmPlatform()); | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
should we have a check for the actual value also ? | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
KeyVaultKey createdKey = keyClient.createKey(keyToCreate);
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
});
} | assertNotNull(createdKey.getProperties().getHsmPlatform()); | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
KeyVaultKey createdKey = keyClient.createKey(keyToCreate);
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
});
} | class KeyClientTest extends KeyClientTestBase {
protected KeyClient keyClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, null);
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyClient = getKeyClientBuilder(buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
}
/**
* Tests that an RSA key is created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createRsaKey(keyToCreate)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
KeyType keyType = runManagedHsmTest ? KeyType.RSA_HSM : KeyType.RSA;
assertRestException(() -> keyClient.createKey("", keyType), ResourceModifiedException.class,
HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
assertRestException(() -> keyClient.createKey(keyToCreate.getName(), keyToCreate.getKeyType()),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertThrows(NullPointerException.class, () -> keyClient.createKey(null));
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateKeyRunner((originalKeyOptions, updatedKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(originalKeyOptions);
assertKeyEquals(originalKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updatedKeyOptions.getExpiresOn()));
assertKeyEquals(updatedKeyOptions, updatedKey);
});
}
/**
* Tests that a key can be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((createKeyOptions, updateKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(createKeyOptions);
assertKeyEquals(createKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updateKeyOptions.getExpiresOn()));
assertKeyEquals(updateKeyOptions, updatedKey);
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
keyClient.createKey(keyToSetAndGet);
KeyVaultKey retrievedKey = keyClient.getKey(keyToSetAndGet.getName());
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
KeyVaultKey keyVersionOne = keyClient.createKey(keyWithOriginalValue);
KeyVaultKey keyVersionTwo = keyClient.createKey(keyWithNewValue);
assertKeyEquals(keyWithOriginalValue,
keyClient.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewValue,
keyClient.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
sleepIfRunningAgainstService(30000);
assertKeyEquals(keyToDelete, keyClient.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDelete.getName()));
DeletedKey deletedKey = deletedKeyPoller.waitForCompletion().getValue();
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginDeleteKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non-existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, keyClient.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
assertNotNull(poller.waitForCompletion());
SyncPoller<KeyVaultKey, Void> recoverPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
KeyVaultKey recoveredKey = recoverPoller.waitForCompletion().getValue();
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginRecoverDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, keyClient.createKey(keyToBackup));
byte[] backupBytes = (keyClient.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to back up a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.backupKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, keyClient.createKey(keyToBackupAndRestore));
byte[] backupBytes = (keyClient.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToBackupAndRestore.getName()));
poller.waitForCompletion();
keyClient.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
KeyVaultKey restoredKey = keyClient.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> keyClient.restoreKeyBackup(keyBackupBytes),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
sleepIfRunningAgainstService(5000);
for (KeyProperties actualKey : keyClient.listPropertiesOfKeys()) {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
}
assertEquals(0, keysToList.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, keyClient.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndGet.getName()));
poller.waitForCompletion();
sleepIfRunningAgainstService(30000);
DeletedKey deletedKey = keyClient.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
for (CreateKeyOptions key : keysToList.values()) {
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(key.getName()));
poller.waitForCompletion();
}
sleepIfRunningAgainstService(90000);
Iterable<DeletedKey> deletedKeys = keyClient.listDeletedKeys();
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
sleepIfRunningAgainstService(4000);
assertKeyEquals(key, keyClient.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = keyClient.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keysToList.size(), keyVersionsList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
assertKeyEquals(keyToRelease, keyClient.createRsaKey(keyToRelease));
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey(keyToRelease.getName(), targetAttestationToken);
assertNotNull(releaseKeyResult.getValue());
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("nonExistentKey", 20);
assertThrows(ResourceNotFoundException.class, () -> keyClient.getKeyRotationPolicy(keyName));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
KeyVaultKey createdKey = keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyVaultKey rotatedKey = keyClient.rotateKey(keyName);
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate));
CryptographyClient cryptographyClient = keyClient.getCryptographyClient(keyToCreate.getName());
assertNotNull(cryptographyClient);
byte[] plaintext = "myPlaintext".getBytes();
byte[] ciphertext = cryptographyClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext).getCipherText();
byte[] decryptedText = cryptographyClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext).getPlainText();
assertArrayEquals(plaintext, decryptedText);
});
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient =
keyClient.getCryptographyClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", "");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", null);
assertNotNull(cryptographyClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyClient.getDeletedKey(keyName);
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyClientTest extends KeyClientTestBase {
protected KeyClient keyClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, null);
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyClient = getKeyClientBuilder(buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
}
/**
* Tests that an RSA key is created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createRsaKey(keyToCreate)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
KeyType keyType = runManagedHsmTest ? KeyType.RSA_HSM : KeyType.RSA;
assertRestException(() -> keyClient.createKey("", keyType), ResourceModifiedException.class,
HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
assertRestException(() -> keyClient.createKey(keyToCreate.getName(), keyToCreate.getKeyType()),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertThrows(NullPointerException.class, () -> keyClient.createKey(null));
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateKeyRunner((originalKeyOptions, updatedKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(originalKeyOptions);
assertKeyEquals(originalKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updatedKeyOptions.getExpiresOn()));
assertKeyEquals(updatedKeyOptions, updatedKey);
});
}
/**
* Tests that a key can be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((createKeyOptions, updateKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(createKeyOptions);
assertKeyEquals(createKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updateKeyOptions.getExpiresOn()));
assertKeyEquals(updateKeyOptions, updatedKey);
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
keyClient.createKey(keyToSetAndGet);
KeyVaultKey retrievedKey = keyClient.getKey(keyToSetAndGet.getName());
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
KeyVaultKey keyVersionOne = keyClient.createKey(keyWithOriginalValue);
KeyVaultKey keyVersionTwo = keyClient.createKey(keyWithNewValue);
assertKeyEquals(keyWithOriginalValue,
keyClient.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewValue,
keyClient.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
sleepIfRunningAgainstService(30000);
assertKeyEquals(keyToDelete, keyClient.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDelete.getName()));
DeletedKey deletedKey = deletedKeyPoller.waitForCompletion().getValue();
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginDeleteKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non-existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, keyClient.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
assertNotNull(poller.waitForCompletion());
SyncPoller<KeyVaultKey, Void> recoverPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
KeyVaultKey recoveredKey = recoverPoller.waitForCompletion().getValue();
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginRecoverDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, keyClient.createKey(keyToBackup));
byte[] backupBytes = (keyClient.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to back up a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.backupKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, keyClient.createKey(keyToBackupAndRestore));
byte[] backupBytes = (keyClient.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToBackupAndRestore.getName()));
poller.waitForCompletion();
keyClient.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
KeyVaultKey restoredKey = keyClient.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> keyClient.restoreKeyBackup(keyBackupBytes),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
sleepIfRunningAgainstService(5000);
for (KeyProperties actualKey : keyClient.listPropertiesOfKeys()) {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
}
assertEquals(0, keysToList.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, keyClient.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndGet.getName()));
poller.waitForCompletion();
sleepIfRunningAgainstService(30000);
DeletedKey deletedKey = keyClient.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
for (CreateKeyOptions key : keysToList.values()) {
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(key.getName()));
poller.waitForCompletion();
}
sleepIfRunningAgainstService(90000);
Iterable<DeletedKey> deletedKeys = keyClient.listDeletedKeys();
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
sleepIfRunningAgainstService(4000);
assertKeyEquals(key, keyClient.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = keyClient.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keysToList.size(), keyVersionsList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
assertKeyEquals(keyToRelease, keyClient.createRsaKey(keyToRelease));
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey(keyToRelease.getName(), targetAttestationToken);
assertNotNull(releaseKeyResult.getValue());
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("nonExistentKey", 20);
assertThrows(ResourceNotFoundException.class, () -> keyClient.getKeyRotationPolicy(keyName));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
KeyVaultKey createdKey = keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyVaultKey rotatedKey = keyClient.rotateKey(keyName);
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate));
CryptographyClient cryptographyClient = keyClient.getCryptographyClient(keyToCreate.getName());
assertNotNull(cryptographyClient);
byte[] plaintext = "myPlaintext".getBytes();
byte[] ciphertext = cryptographyClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext).getCipherText();
byte[] decryptedText = cryptographyClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext).getPlainText();
assertArrayEquals(plaintext, decryptedText);
});
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient =
keyClient.getCryptographyClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", "");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", null);
assertNotNull(cryptographyClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyClient.getDeletedKey(keyName);
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
should we have a check for the actual value also ? | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
keyClient.createKey(keyToSetAndGet);
KeyVaultKey retrievedKey = keyClient.getKey(keyToSetAndGet.getName());
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
});
} | assertNotNull(retrievedKey.getProperties().getHsmPlatform()); | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
keyClient.createKey(keyToSetAndGet);
KeyVaultKey retrievedKey = keyClient.getKey(keyToSetAndGet.getName());
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
});
} | class KeyClientTest extends KeyClientTestBase {
protected KeyClient keyClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, null);
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyClient = getKeyClientBuilder(buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
KeyVaultKey createdKey = keyClient.createKey(keyToCreate);
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
});
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
}
/**
* Tests that an RSA key is created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createRsaKey(keyToCreate)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
KeyType keyType = runManagedHsmTest ? KeyType.RSA_HSM : KeyType.RSA;
assertRestException(() -> keyClient.createKey("", keyType), ResourceModifiedException.class,
HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
assertRestException(() -> keyClient.createKey(keyToCreate.getName(), keyToCreate.getKeyType()),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertThrows(NullPointerException.class, () -> keyClient.createKey(null));
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateKeyRunner((originalKeyOptions, updatedKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(originalKeyOptions);
assertKeyEquals(originalKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updatedKeyOptions.getExpiresOn()));
assertKeyEquals(updatedKeyOptions, updatedKey);
});
}
/**
* Tests that a key can be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((createKeyOptions, updateKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(createKeyOptions);
assertKeyEquals(createKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updateKeyOptions.getExpiresOn()));
assertKeyEquals(updateKeyOptions, updatedKey);
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
KeyVaultKey keyVersionOne = keyClient.createKey(keyWithOriginalValue);
KeyVaultKey keyVersionTwo = keyClient.createKey(keyWithNewValue);
assertKeyEquals(keyWithOriginalValue,
keyClient.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewValue,
keyClient.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
sleepIfRunningAgainstService(30000);
assertKeyEquals(keyToDelete, keyClient.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDelete.getName()));
DeletedKey deletedKey = deletedKeyPoller.waitForCompletion().getValue();
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginDeleteKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non-existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, keyClient.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
assertNotNull(poller.waitForCompletion());
SyncPoller<KeyVaultKey, Void> recoverPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
KeyVaultKey recoveredKey = recoverPoller.waitForCompletion().getValue();
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginRecoverDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, keyClient.createKey(keyToBackup));
byte[] backupBytes = (keyClient.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to back up a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.backupKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, keyClient.createKey(keyToBackupAndRestore));
byte[] backupBytes = (keyClient.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToBackupAndRestore.getName()));
poller.waitForCompletion();
keyClient.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
KeyVaultKey restoredKey = keyClient.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> keyClient.restoreKeyBackup(keyBackupBytes),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
sleepIfRunningAgainstService(5000);
for (KeyProperties actualKey : keyClient.listPropertiesOfKeys()) {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
}
assertEquals(0, keysToList.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, keyClient.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndGet.getName()));
poller.waitForCompletion();
sleepIfRunningAgainstService(30000);
DeletedKey deletedKey = keyClient.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
for (CreateKeyOptions key : keysToList.values()) {
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(key.getName()));
poller.waitForCompletion();
}
sleepIfRunningAgainstService(90000);
Iterable<DeletedKey> deletedKeys = keyClient.listDeletedKeys();
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
sleepIfRunningAgainstService(4000);
assertKeyEquals(key, keyClient.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = keyClient.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keysToList.size(), keyVersionsList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
assertKeyEquals(keyToRelease, keyClient.createRsaKey(keyToRelease));
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey(keyToRelease.getName(), targetAttestationToken);
assertNotNull(releaseKeyResult.getValue());
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("nonExistentKey", 20);
assertThrows(ResourceNotFoundException.class, () -> keyClient.getKeyRotationPolicy(keyName));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
KeyVaultKey createdKey = keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyVaultKey rotatedKey = keyClient.rotateKey(keyName);
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate));
CryptographyClient cryptographyClient = keyClient.getCryptographyClient(keyToCreate.getName());
assertNotNull(cryptographyClient);
byte[] plaintext = "myPlaintext".getBytes();
byte[] ciphertext = cryptographyClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext).getCipherText();
byte[] decryptedText = cryptographyClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext).getPlainText();
assertArrayEquals(plaintext, decryptedText);
});
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient =
keyClient.getCryptographyClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", "");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", null);
assertNotNull(cryptographyClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyClient.getDeletedKey(keyName);
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyClientTest extends KeyClientTestBase {
protected KeyClient keyClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, null);
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyClient = getKeyClientBuilder(buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
KeyVaultKey createdKey = keyClient.createKey(keyToCreate);
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
});
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
}
/**
* Tests that an RSA key is created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createRsaKey(keyToCreate)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
KeyType keyType = runManagedHsmTest ? KeyType.RSA_HSM : KeyType.RSA;
assertRestException(() -> keyClient.createKey("", keyType), ResourceModifiedException.class,
HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
assertRestException(() -> keyClient.createKey(keyToCreate.getName(), keyToCreate.getKeyType()),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertThrows(NullPointerException.class, () -> keyClient.createKey(null));
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateKeyRunner((originalKeyOptions, updatedKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(originalKeyOptions);
assertKeyEquals(originalKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updatedKeyOptions.getExpiresOn()));
assertKeyEquals(updatedKeyOptions, updatedKey);
});
}
/**
* Tests that a key can be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((createKeyOptions, updateKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(createKeyOptions);
assertKeyEquals(createKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updateKeyOptions.getExpiresOn()));
assertKeyEquals(updateKeyOptions, updatedKey);
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
KeyVaultKey keyVersionOne = keyClient.createKey(keyWithOriginalValue);
KeyVaultKey keyVersionTwo = keyClient.createKey(keyWithNewValue);
assertKeyEquals(keyWithOriginalValue,
keyClient.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewValue,
keyClient.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
sleepIfRunningAgainstService(30000);
assertKeyEquals(keyToDelete, keyClient.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDelete.getName()));
DeletedKey deletedKey = deletedKeyPoller.waitForCompletion().getValue();
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginDeleteKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non-existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, keyClient.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
assertNotNull(poller.waitForCompletion());
SyncPoller<KeyVaultKey, Void> recoverPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
KeyVaultKey recoveredKey = recoverPoller.waitForCompletion().getValue();
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginRecoverDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, keyClient.createKey(keyToBackup));
byte[] backupBytes = (keyClient.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to back up a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.backupKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, keyClient.createKey(keyToBackupAndRestore));
byte[] backupBytes = (keyClient.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToBackupAndRestore.getName()));
poller.waitForCompletion();
keyClient.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
KeyVaultKey restoredKey = keyClient.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> keyClient.restoreKeyBackup(keyBackupBytes),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
sleepIfRunningAgainstService(5000);
for (KeyProperties actualKey : keyClient.listPropertiesOfKeys()) {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
}
assertEquals(0, keysToList.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, keyClient.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndGet.getName()));
poller.waitForCompletion();
sleepIfRunningAgainstService(30000);
DeletedKey deletedKey = keyClient.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
for (CreateKeyOptions key : keysToList.values()) {
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(key.getName()));
poller.waitForCompletion();
}
sleepIfRunningAgainstService(90000);
Iterable<DeletedKey> deletedKeys = keyClient.listDeletedKeys();
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
sleepIfRunningAgainstService(4000);
assertKeyEquals(key, keyClient.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = keyClient.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keysToList.size(), keyVersionsList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
assertKeyEquals(keyToRelease, keyClient.createRsaKey(keyToRelease));
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey(keyToRelease.getName(), targetAttestationToken);
assertNotNull(releaseKeyResult.getValue());
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("nonExistentKey", 20);
assertThrows(ResourceNotFoundException.class, () -> keyClient.getKeyRotationPolicy(keyName));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
KeyVaultKey createdKey = keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyVaultKey rotatedKey = keyClient.rotateKey(keyName);
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate));
CryptographyClient cryptographyClient = keyClient.getCryptographyClient(keyToCreate.getName());
assertNotNull(cryptographyClient);
byte[] plaintext = "myPlaintext".getBytes();
byte[] ciphertext = cryptographyClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext).getCipherText();
byte[] decryptedText = cryptographyClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext).getPlainText();
assertArrayEquals(plaintext, decryptedText);
});
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient =
keyClient.getCryptographyClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", "");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", null);
assertNotNull(cryptographyClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyClient.getDeletedKey(keyName);
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
I just wanted to make sure we deserialize properly but checking the value itself should tell us if there's changes on the service-side of things. It should be `0` by default from what I've seen. | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
} | assertNotNull(createdKey.getProperties().getHsmPlatform()); | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
Adding the operation name in the exception message could help troubleshooting. `RequestOptions.NONE is immutable. Cannot add query params`. We should update the message in other places too. | public RequestOptions addQueryParam(String parameterName, String value) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
return addQueryParam(parameterName, value, false);
} | throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable.")); | public RequestOptions addQueryParam(String parameterName, String value) {
return addQueryParam(parameterName, value, false);
} | class RequestOptions {
public static final RequestOptions NONE = new RequestOptions()
.setContext(Context.EMPTY)
.lock();
private static final ClientLogger LOGGER = new ClientLogger(RequestOptions.class);
private Consumer<HttpRequest> requestCallback = request -> {
};
private Context context;
private ResponseBodyMode responseBodyMode;
private boolean locked;
/**
* Creates a new instance of {@link RequestOptions}.
*/
public RequestOptions() {
}
/**
* Gets the request callback, applying all the configurations set on this instance of {@link RequestOptions}.
*
* @return The request callback.
*/
public Consumer<HttpRequest> getRequestCallback() {
return this.requestCallback;
}
/**
* Gets the additional context on the request that is passed during the service call.
*
* @return The additional context that is passed during the service call.
*/
public Context getContext() {
return context;
}
/**
* Gets the configuration indicating how the body of the resulting HTTP response should be handled.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @return The configuration indicating how the body of the resulting HTTP response should be handled.
*/
public ResponseBodyMode getResponseBodyMode() {
return responseBodyMode;
}
/**
* Adds a header to the {@link HttpRequest}.
*
* <p>If a header with the given name exists, the {@code value} is added to the existing header (comma-separated),
* otherwise a new header will be created.</p>
*
* @param header The header key.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions addHeader(HttpHeader header) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().add(header));
return this;
}
/**
* Sets a header on the {@link HttpRequest}.
*
* <p>If a header with the given name exists it is overridden by the new {@code value}.</p>
*
* @param header The header key.
* @param value The header value.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions setHeader(HttpHeaderName header, String value) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().set(header, value));
return this;
}
/**
* Adds a query parameter to the request URL. The parameter name and value will be URL encoded. To use an already
* encoded parameter name and value, call {@code addQueryParam("name", "value", true)}.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
*
* @return The updated {@link RequestOptions} object.
*/
/**
* Adds a query parameter to the request URL, specifying whether the parameter is already encoded. A value
* {@code true} for this argument indicates that value of {@link QueryParam
* engine should not encode it. By default, the value will be encoded.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
* @param encoded Whether this query parameter is already encoded.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions addQueryParam(String parameterName, String value, boolean encoded) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.requestCallback = this.requestCallback.andThen(request -> {
String url = request.getUrl().toString();
String encodedParameterName = encoded ? parameterName : UrlEscapers.QUERY_ESCAPER.escape(parameterName);
String encodedParameterValue = encoded ? value : UrlEscapers.QUERY_ESCAPER.escape(value);
request.setUrl(url + (url.contains("?") ? "&" : "?") + encodedParameterName + "=" + encodedParameterValue);
});
return this;
}
/**
* Adds a custom request callback to modify the {@link HttpRequest} before it's sent by the {@link HttpClient}. The
* modifications made on a {@link RequestOptions} object are applied in order on the request.
*
* @param requestCallback The request callback.
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestCallback} is {@code null}.
*/
public RequestOptions addRequestCallback(Consumer<HttpRequest> requestCallback) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
Objects.requireNonNull(requestCallback, "'requestCallback' cannot be null.");
this.requestCallback = this.requestCallback.andThen(requestCallback);
return this;
}
/**
* Sets the body to send as part of the {@link HttpRequest}.
*
* @param requestBody the request body data
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestBody} is {@code null}.
*/
public RequestOptions setBody(BinaryData requestBody) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
Objects.requireNonNull(requestBody, "'requestBody' cannot be null.");
this.requestCallback = this.requestCallback.andThen(request -> request.setBody(requestBody));
return this;
}
/**
* Sets the additional context on the request that is passed during the service call.
*
* @param context Additional context that is passed during the service call.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions setContext(Context context) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.context = context;
return this;
}
/**
* Sets the configuration indicating how the body of the resulting HTTP response should be handled. If {@code null},
* the response body will be handled based on the content type of the response.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @param responseBodyMode The configuration indicating how the body of the resulting HTTP response should be
* handled.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions setResponseBodyMode(ResponseBodyMode responseBodyMode) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.responseBodyMode = responseBodyMode;
return this;
}
/**
* Locks this {@link RequestOptions} to prevent further modifications.
*
* @return This {@link RequestOptions} instance.
*/
private RequestOptions lock() {
locked = true;
return this;
}
} | class RequestOptions {
/**
* Signifies that no options need to be passed to the pipeline.
*/
public static final RequestOptions NONE = new RequestOptions().lock();
private static final ClientLogger LOGGER = new ClientLogger(RequestOptions.class);
private Consumer<HttpRequest> requestCallback = request -> {
};
private Context context;
private ResponseBodyMode responseBodyMode;
private boolean locked;
private ClientLogger logger;
/**
* Creates a new instance of {@link RequestOptions}.
*/
public RequestOptions() {
this.context = Context.EMPTY;
}
/**
* Gets the request callback, applying all the configurations set on this instance of {@link RequestOptions}.
*
* @return The request callback.
*/
public Consumer<HttpRequest> getRequestCallback() {
return this.requestCallback;
}
/**
* Gets the additional context on the request that is passed during the service call.
*
* @return The additional context that is passed during the service call.
*/
public Context getContext() {
return context;
}
/**
* Gets the configuration indicating how the body of the resulting HTTP response should be handled.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @return The configuration indicating how the body of the resulting HTTP response should be handled.
*/
public ResponseBodyMode getResponseBodyMode() {
return responseBodyMode;
}
/**
* Gets the {@link ClientLogger} used to log the request and response.
*
* @return The {@link ClientLogger} used to log the request and response.
*/
public ClientLogger getLogger() {
return logger;
}
/**
* Adds a header to the {@link HttpRequest}.
*
* <p>If a header with the given name exists, the {@code value} is added to the existing header (comma-separated),
* otherwise a new header will be created.</p>
*
* @param header The header key.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions addHeader(HttpHeader header) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot add header."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().add(header));
return this;
}
/**
* Sets a header on the {@link HttpRequest}.
*
* <p>If a header with the given name exists it is overridden by the new {@code value}.</p>
*
* @param header The header key.
* @param value The header value.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setHeader(HttpHeaderName header, String value) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set header."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().set(header, value));
return this;
}
/**
* Adds a query parameter to the request URL. The parameter name and value will be URL encoded. To use an already
* encoded parameter name and value, call {@code addQueryParam("name", "value", true)}.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
/**
* Adds a query parameter to the request URL, specifying whether the parameter is already encoded. A value
* {@code true} for this argument indicates that value of {@link QueryParam
* engine should not encode it. By default, the value will be encoded.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
* @param encoded Whether this query parameter is already encoded.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions addQueryParam(String parameterName, String value, boolean encoded) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot add query param."));
}
this.requestCallback = this.requestCallback.andThen(request -> {
String url = request.getUrl().toString();
String encodedParameterName = encoded ? parameterName : UrlEscapers.QUERY_ESCAPER.escape(parameterName);
String encodedParameterValue = encoded ? value : UrlEscapers.QUERY_ESCAPER.escape(value);
request.setUrl(url + (url.contains("?") ? "&" : "?") + encodedParameterName + "=" + encodedParameterValue);
});
return this;
}
/**
* Adds a custom request callback to modify the {@link HttpRequest} before it's sent by the {@link HttpClient}. The
* modifications made on a {@link RequestOptions} object are applied in order on the request.
*
* @param requestCallback The request callback.
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestCallback} is {@code null}.
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions addRequestCallback(Consumer<HttpRequest> requestCallback) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot add request callback."));
}
Objects.requireNonNull(requestCallback, "'requestCallback' cannot be null.");
this.requestCallback = this.requestCallback.andThen(requestCallback);
return this;
}
/**
* Sets the body to send as part of the {@link HttpRequest}.
*
* @param requestBody the request body data
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestBody} is {@code null}.
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setBody(BinaryData requestBody) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set body."));
}
Objects.requireNonNull(requestBody, "'requestBody' cannot be null.");
this.requestCallback = this.requestCallback.andThen(request -> request.setBody(requestBody));
return this;
}
/**
* Sets the additional context on the request that is passed during the service call.
*
* @param context Additional context that is passed during the service call.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setContext(Context context) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set context."));
}
this.context = context;
return this;
}
/**
* Sets the configuration indicating how the body of the resulting HTTP response should be handled. If {@code null},
* the response body will be handled based on the content type of the response.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @param responseBodyMode The configuration indicating how the body of the resulting HTTP response should be
* handled.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setResponseBodyMode(ResponseBodyMode responseBodyMode) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set response body mode."));
}
this.responseBodyMode = responseBodyMode;
return this;
}
/**
* Sets the {@link ClientLogger} used to log the request and response.
*
* @param logger The {@link ClientLogger} used to log the request and response.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setLogger(ClientLogger logger) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set logger."));
}
this.logger = logger;
return this;
}
/**
* Locks this {@link RequestOptions} to prevent further modifications.
*
* @return This {@link RequestOptions} instance.
*/
private RequestOptions lock() {
locked = true;
return this;
}
} |
See my other [comment](https://github.com/Azure/azure-sdk-for-java/pull/39537#discussion_r1552510840) about this. | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | assertNotNull(createdKey.getProperties().getHsmPlatform()); | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
See my other [comment](https://github.com/Azure/azure-sdk-for-java/pull/39537#discussion_r1552510840) about this. | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | assertNotNull(retrievedKey.getProperties().getHsmPlatform()); | public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToSetAndGet))
.assertNext(createdKey -> {
assertKeyEquals(keyToSetAndGet, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKey(keyToSetAndGet.getName()))
.assertNext(retrievedKey -> {
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
})
.verifyComplete();
});
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyAsyncClientTest extends KeyClientTestBase {
protected KeyAsyncClient keyAsyncClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, null);
}
protected void createKeyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyAsyncClient = getKeyClientBuilder(buildAsyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildAsyncClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(createdKey -> {
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
})
.verifyComplete());
}
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that a RSA key created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createRsaKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete());
}
/**
* Tests that we cannot create a key when the key is an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
final KeyType keyType;
if (runManagedHsmTest) {
keyType = KeyType.RSA_HSM;
} else {
keyType = KeyType.RSA;
}
StepVerifier.create(keyAsyncClient.createKey("", keyType))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that we can create keys when value is not null or an empty string.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST)));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.createKey(null))
.verifyError(NullPointerException.class);
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that a key is not able to be updated when it is disabled. 403 error is expected.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
updateDisabledKeyRunner((originalKey, updatedKey) -> StepVerifier.create(keyAsyncClient.createKey(originalKey)
.flatMap(response -> {
assertKeyEquals(originalKey, response);
return keyAsyncClient.updateKeyProperties(response.getProperties()
.setExpiresOn(updatedKey.getExpiresOn()));
}))
.assertNext(response -> assertKeyEquals(updatedKey, response))
.verifyComplete());
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
StepVerifier.create(keyAsyncClient.createKey(keyWithOriginalValue).flatMap(keyVersionOne ->
keyAsyncClient.getKey(keyWithOriginalValue.getName(), keyVersionOne.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithOriginalValue, response))
.verifyComplete();
StepVerifier.create(keyAsyncClient.createKey(keyWithNewValue).flatMap(keyVersionTwo ->
keyAsyncClient.getKey(keyWithNewValue.getName(), keyVersionTwo.getProperties().getVersion())))
.assertNext(response -> assertKeyEquals(keyWithNewValue, response))
.verifyComplete();
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDelete))
.assertNext(keyResponse -> assertKeyEquals(keyToDelete, keyResponse)).verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDelete.getName()));
StepVerifier.create(poller.last().map(AsyncPollResponse::getValue))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKeyResponse.getName());
})
.verifyComplete();
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginDeleteKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that an attempt to retrieve a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndRecover))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndRecover, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
PollerFlux<KeyVaultKey, Void> recoverPoller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
StepVerifier.create(recoverPoller.last().map(AsyncPollResponse::getValue))
.assertNext(keyResponse -> {
assertEquals(keyToDeleteAndRecover.getName(), keyResponse.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), keyResponse.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), keyResponse.getProperties().getExpiresOn());
})
.verifyComplete();
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.beginRecoverDeletedKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackup))
.assertNext(keyResponse -> assertKeyEquals(keyToBackup, keyResponse)).verifyComplete();
StepVerifier.create(keyAsyncClient.backupKey(keyToBackup.getName()))
.assertNext(response -> {
assertNotNull(response);
assertTrue(response.length > 0);
}).verifyComplete();
});
}
/**
* Tests that an attempt to backup a non existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.backupKey("non-existing"))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToBackupAndRestore))
.assertNext(keyResponse -> assertKeyEquals(keyToBackupAndRestore, keyResponse))
.verifyComplete();
byte[] backup = keyAsyncClient.backupKey(keyToBackupAndRestore.getName()).block();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToBackupAndRestore.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.purgeDeletedKeyWithResponse(keyToBackupAndRestore.getName()))
.assertNext(voidResponse ->
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, voidResponse.getStatusCode()))
.verifyComplete();
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
StepVerifier.create(keyAsyncClient.restoreKeyBackup(backup))
.assertNext(response -> {
assertEquals(keyToBackupAndRestore.getName(), response.getName());
assertEquals(keyToBackupAndRestore.getNotBefore(), response.getProperties().getNotBefore());
assertEquals(keyToBackupAndRestore.getExpiresOn(), response.getProperties().getExpiresOn());
}).verifyComplete();
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
StepVerifier.create(keyAsyncClient.restoreKeyBackup(keyBackupBytes))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToDeleteAndGet))
.assertNext(keyResponse -> assertKeyEquals(keyToDeleteAndGet, keyResponse))
.verifyComplete();
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(keyToDeleteAndGet.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getDeletedKey(keyToDeleteAndGet.getName()))
.assertNext(deletedKeyResponse -> {
assertNotNull(deletedKeyResponse.getDeletedOn());
assertNotNull(deletedKeyResponse.getRecoveryId());
assertNotNull(deletedKeyResponse.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKeyResponse.getName());
}).verifyComplete();
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse)).verifyComplete();
}
sleepIfRunningAgainstService(10000);
for (CreateKeyOptions key : keysToList.values()) {
PollerFlux<DeletedKey, Void> poller = setPlaybackPollerFluxPollInterval(
keyAsyncClient.beginDeleteKey(key.getName()));
StepVerifier.create(poller.last())
.expectNextCount(1)
.verifyComplete();
}
sleepIfRunningAgainstService(90000);
StepVerifier.create(keyAsyncClient.listDeletedKeys()
.doOnNext(actualKey -> {
assertNotNull(actualKey.getDeletedOn());
assertNotNull(actualKey.getRecoveryId());
}).last())
.expectNextCount(1)
.verifyComplete();
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(30000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeyVersions(keyName).collectList())
.assertNext(actualKeys -> assertEquals(keysToList.size(), actualKeys.size()))
.verifyComplete();
});
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
StepVerifier.create(keyAsyncClient.createKey(key))
.assertNext(keyResponse -> assertKeyEquals(key, keyResponse))
.verifyComplete();
}
sleepIfRunningAgainstService(10000);
StepVerifier.create(keyAsyncClient.listPropertiesOfKeys().map(actualKey -> {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
return actualKey;
}).last())
.expectNextCount(1)
.verifyComplete();
assertEquals(0, keysToList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyAsyncClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(keyToRelease))
.assertNext(keyResponse -> assertKeyEquals(keyToRelease, keyResponse))
.verifyComplete();
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
StepVerifier.create(keyAsyncClient.releaseKey(keyToRelease.getName(), targetAttestationToken))
.assertNext(releaseKeyResult -> assertNotNull(releaseKeyResult.getValue()))
.expectComplete()
.verify();
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(testResourceNamer.randomName("nonExistentKey", 20)))
.verifyErrorSatisfies(e ->
assertRestException(e, ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.getKeyRotationPolicy(keyName))
.assertNext(keyRotationPolicy -> {
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}).verifyComplete();
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName)))
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(keyAsyncClient.updateKeyRotationPolicy(keyName, keyRotationPolicy)
.flatMap(updatedKeyRotationPolicy -> Mono.zip(Mono.just(updatedKeyRotationPolicy),
keyAsyncClient.getKeyRotationPolicy(keyName))))
.assertNext(tuple -> assertKeyVaultRotationPolicyEquals(tuple.getT1(), tuple.getT2()))
.verifyComplete();
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyAsyncClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
StepVerifier.create(keyAsyncClient.createRsaKey(new CreateRsaKeyOptions(keyName))
.flatMap(createdKey -> Mono.zip(Mono.just(createdKey),
keyAsyncClient.rotateKey(keyName))))
.assertNext(tuple -> {
KeyVaultKey createdKey = tuple.getT1();
KeyVaultKey rotatedKey = tuple.getT2();
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}).verifyComplete();
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
StepVerifier.create(keyAsyncClient.createKey(keyToCreate))
.assertNext(response -> assertKeyEquals(keyToCreate, response))
.verifyComplete();
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient(keyToCreate.getName());
assertNotNull(cryptographyAsyncClient);
byte[] plaintext = "myPlaintext".getBytes();
StepVerifier.create(cryptographyAsyncClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext)
.map(EncryptResult::getCipherText)
.flatMap(ciphertext -> cryptographyAsyncClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext)
.map(DecryptResult::getPlainText)))
.assertNext(decryptedText -> assertArrayEquals(plaintext, decryptedText))
.verifyComplete();
});
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key and version using a
* {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient =
keyAsyncClient.getCryptographyAsyncClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", "");
assertNotNull(cryptographyAsyncClient);
}
/**
* Tests that a {@link CryptographyAsyncClient} can be created for a given key using a {@link KeyAsyncClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyAsyncClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyAsyncClient(httpClient, serviceVersion);
CryptographyAsyncClient cryptographyAsyncClient = keyAsyncClient.getCryptographyAsyncClient("myKey", null);
assertNotNull(cryptographyAsyncClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyAsyncClient.getDeletedKey(keyName).block();
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
See my other [comment](https://github.com/Azure/azure-sdk-for-java/pull/39537#discussion_r1552510840) about this. | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
KeyVaultKey createdKey = keyClient.createKey(keyToCreate);
assertKeyEquals(keyToCreate, createdKey);
assertNotNull(createdKey.getProperties().getHsmPlatform());
});
} | assertNotNull(createdKey.getProperties().getHsmPlatform()); | public void createKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
KeyVaultKey createdKey = keyClient.createKey(keyToCreate);
assertKeyEquals(keyToCreate, createdKey);
assertEquals("0", createdKey.getProperties().getHsmPlatform());
});
} | class KeyClientTest extends KeyClientTestBase {
protected KeyClient keyClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, null);
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyClient = getKeyClientBuilder(buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
}
/**
* Tests that an RSA key is created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createRsaKey(keyToCreate)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
KeyType keyType = runManagedHsmTest ? KeyType.RSA_HSM : KeyType.RSA;
assertRestException(() -> keyClient.createKey("", keyType), ResourceModifiedException.class,
HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
assertRestException(() -> keyClient.createKey(keyToCreate.getName(), keyToCreate.getKeyType()),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertThrows(NullPointerException.class, () -> keyClient.createKey(null));
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateKeyRunner((originalKeyOptions, updatedKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(originalKeyOptions);
assertKeyEquals(originalKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updatedKeyOptions.getExpiresOn()));
assertKeyEquals(updatedKeyOptions, updatedKey);
});
}
/**
* Tests that a key can be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((createKeyOptions, updateKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(createKeyOptions);
assertKeyEquals(createKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updateKeyOptions.getExpiresOn()));
assertKeyEquals(updateKeyOptions, updatedKey);
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
keyClient.createKey(keyToSetAndGet);
KeyVaultKey retrievedKey = keyClient.getKey(keyToSetAndGet.getName());
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertNotNull(retrievedKey.getProperties().getHsmPlatform());
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
KeyVaultKey keyVersionOne = keyClient.createKey(keyWithOriginalValue);
KeyVaultKey keyVersionTwo = keyClient.createKey(keyWithNewValue);
assertKeyEquals(keyWithOriginalValue,
keyClient.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewValue,
keyClient.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
sleepIfRunningAgainstService(30000);
assertKeyEquals(keyToDelete, keyClient.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDelete.getName()));
DeletedKey deletedKey = deletedKeyPoller.waitForCompletion().getValue();
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginDeleteKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non-existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, keyClient.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
assertNotNull(poller.waitForCompletion());
SyncPoller<KeyVaultKey, Void> recoverPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
KeyVaultKey recoveredKey = recoverPoller.waitForCompletion().getValue();
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginRecoverDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, keyClient.createKey(keyToBackup));
byte[] backupBytes = (keyClient.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to back up a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.backupKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, keyClient.createKey(keyToBackupAndRestore));
byte[] backupBytes = (keyClient.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToBackupAndRestore.getName()));
poller.waitForCompletion();
keyClient.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
KeyVaultKey restoredKey = keyClient.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> keyClient.restoreKeyBackup(keyBackupBytes),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
sleepIfRunningAgainstService(5000);
for (KeyProperties actualKey : keyClient.listPropertiesOfKeys()) {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
}
assertEquals(0, keysToList.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, keyClient.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndGet.getName()));
poller.waitForCompletion();
sleepIfRunningAgainstService(30000);
DeletedKey deletedKey = keyClient.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
for (CreateKeyOptions key : keysToList.values()) {
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(key.getName()));
poller.waitForCompletion();
}
sleepIfRunningAgainstService(90000);
Iterable<DeletedKey> deletedKeys = keyClient.listDeletedKeys();
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
sleepIfRunningAgainstService(4000);
assertKeyEquals(key, keyClient.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = keyClient.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keysToList.size(), keyVersionsList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
assertKeyEquals(keyToRelease, keyClient.createRsaKey(keyToRelease));
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey(keyToRelease.getName(), targetAttestationToken);
assertNotNull(releaseKeyResult.getValue());
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("nonExistentKey", 20);
assertThrows(ResourceNotFoundException.class, () -> keyClient.getKeyRotationPolicy(keyName));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
KeyVaultKey createdKey = keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyVaultKey rotatedKey = keyClient.rotateKey(keyName);
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate));
CryptographyClient cryptographyClient = keyClient.getCryptographyClient(keyToCreate.getName());
assertNotNull(cryptographyClient);
byte[] plaintext = "myPlaintext".getBytes();
byte[] ciphertext = cryptographyClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext).getCipherText();
byte[] decryptedText = cryptographyClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext).getPlainText();
assertArrayEquals(plaintext, decryptedText);
});
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient =
keyClient.getCryptographyClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", "");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", null);
assertNotNull(cryptographyClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyClient.getDeletedKey(keyName);
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} | class KeyClientTest extends KeyClientTestBase {
protected KeyClient keyClient;
@Override
protected void beforeTest() {
beforeTestSetup();
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, null);
}
protected void createKeyClient(HttpClient httpClient, KeyServiceVersion serviceVersion, String testTenantId) {
keyClient = getKeyClientBuilder(buildSyncAssertingClient(
interceptorManager.isPlaybackMode() ? interceptorManager.getPlaybackClient() : httpClient), testTenantId,
getEndpoint(), serviceVersion)
.buildClient();
}
/**
* Tests that a key can be created in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
/**
* Tests that a key can be created in the key vault while using a different tenant ID than the one that will be
* provided in the authentication challenge.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyWithMultipleTenants(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion, testResourceNamer.randomUuid());
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
KeyVaultCredentialPolicy.clearCache();
createKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate)));
}
/**
* Tests that an RSA key is created.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createRsaKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createRsaKeyRunner((keyToCreate) -> assertKeyEquals(keyToCreate, keyClient.createRsaKey(keyToCreate)));
}
/**
* Tests that an attempt to create a key with empty string name throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyEmptyName(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
KeyType keyType = runManagedHsmTest ? KeyType.RSA_HSM : KeyType.RSA;
assertRestException(() -> keyClient.createKey("", keyType), ResourceModifiedException.class,
HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that we cannot create keys when key type is null.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNullType(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyEmptyValueRunner((keyToCreate) ->
assertRestException(() -> keyClient.createKey(keyToCreate.getName(), keyToCreate.getKeyType()),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST));
}
/**
* Verifies that an exception is thrown when null key object is passed for creation.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void createKeyNull(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertThrows(NullPointerException.class, () -> keyClient.createKey(null));
}
/**
* Tests that a key is able to be updated when it exists.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateKeyRunner((originalKeyOptions, updatedKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(originalKeyOptions);
assertKeyEquals(originalKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updatedKeyOptions.getExpiresOn()));
assertKeyEquals(updatedKeyOptions, updatedKey);
});
}
/**
* Tests that a key can be updated when it is disabled.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void updateDisabledKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
updateDisabledKeyRunner((createKeyOptions, updateKeyOptions) -> {
KeyVaultKey createdKey = keyClient.createKey(createKeyOptions);
assertKeyEquals(createKeyOptions, createdKey);
KeyVaultKey updatedKey =
keyClient.updateKeyProperties(createdKey.getProperties().setExpiresOn(updateKeyOptions.getExpiresOn()));
assertKeyEquals(updateKeyOptions, updatedKey);
});
}
/**
* Tests that an existing key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeyRunner((keyToSetAndGet) -> {
keyClient.createKey(keyToSetAndGet);
KeyVaultKey retrievedKey = keyClient.getKey(keyToSetAndGet.getName());
assertKeyEquals(keyToSetAndGet, retrievedKey);
assertEquals("0", retrievedKey.getProperties().getHsmPlatform());
});
}
/**
* Tests that a specific version of the key can be retrieved.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeySpecificVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getKeySpecificVersionRunner((keyWithOriginalValue, keyWithNewValue) -> {
KeyVaultKey keyVersionOne = keyClient.createKey(keyWithOriginalValue);
KeyVaultKey keyVersionTwo = keyClient.createKey(keyWithNewValue);
assertKeyEquals(keyWithOriginalValue,
keyClient.getKey(keyVersionOne.getName(), keyVersionOne.getProperties().getVersion()));
assertKeyEquals(keyWithNewValue,
keyClient.getKey(keyVersionTwo.getName(), keyVersionTwo.getProperties().getVersion()));
});
}
/**
* Tests that an attempt to get a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an existing key can be deleted.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
deleteKeyRunner((keyToDelete) -> {
sleepIfRunningAgainstService(30000);
assertKeyEquals(keyToDelete, keyClient.createKey(keyToDelete));
SyncPoller<DeletedKey, Void> deletedKeyPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDelete.getName()));
DeletedKey deletedKey = deletedKeyPoller.waitForCompletion().getValue();
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDelete.getName(), deletedKey.getName());
});
}
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void deleteKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginDeleteKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that an attempt to retrieve a non-existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.getDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a deleted key can be recovered on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
recoverDeletedKeyRunner((keyToDeleteAndRecover) -> {
assertKeyEquals(keyToDeleteAndRecover, keyClient.createKey(keyToDeleteAndRecover));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndRecover.getName()));
assertNotNull(poller.waitForCompletion());
SyncPoller<KeyVaultKey, Void> recoverPoller = setPlaybackSyncPollerPollInterval(
keyClient.beginRecoverDeletedKey(keyToDeleteAndRecover.getName()));
KeyVaultKey recoveredKey = recoverPoller.waitForCompletion().getValue();
assertEquals(keyToDeleteAndRecover.getName(), recoveredKey.getName());
assertEquals(keyToDeleteAndRecover.getNotBefore(), recoveredKey.getProperties().getNotBefore());
assertEquals(keyToDeleteAndRecover.getExpiresOn(), recoveredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to recover a non existing deleted key throws an error on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void recoverDeletedKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.beginRecoverDeletedKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
backupKeyRunner((keyToBackup) -> {
assertKeyEquals(keyToBackup, keyClient.createKey(keyToBackup));
byte[] backupBytes = (keyClient.backupKey(keyToBackup.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
});
}
/**
* Tests that an attempt to back up a non-existing key throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void backupKeyNotFound(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
assertRestException(() -> keyClient.backupKey("non-existing"),
ResourceNotFoundException.class, HttpURLConnection.HTTP_NOT_FOUND);
}
/**
* Tests that a key can be backed up in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
restoreKeyRunner((keyToBackupAndRestore) -> {
assertKeyEquals(keyToBackupAndRestore, keyClient.createKey(keyToBackupAndRestore));
byte[] backupBytes = (keyClient.backupKey(keyToBackupAndRestore.getName()));
assertNotNull(backupBytes);
assertTrue(backupBytes.length > 0);
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToBackupAndRestore.getName()));
poller.waitForCompletion();
keyClient.purgeDeletedKey(keyToBackupAndRestore.getName());
pollOnKeyPurge(keyToBackupAndRestore.getName());
sleepIfRunningAgainstService(60000);
KeyVaultKey restoredKey = keyClient.restoreKeyBackup(backupBytes);
assertEquals(keyToBackupAndRestore.getName(), restoredKey.getName());
assertEquals(keyToBackupAndRestore.getExpiresOn(), restoredKey.getProperties().getExpiresOn());
});
}
/**
* Tests that an attempt to restore a key from malformed backup bytes throws an error.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void restoreKeyFromMalformedBackup(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
byte[] keyBackupBytes = "non-existing".getBytes();
assertRestException(() -> keyClient.restoreKeyBackup(keyBackupBytes),
ResourceModifiedException.class, HttpURLConnection.HTTP_BAD_REQUEST);
}
/**
* Tests that keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
sleepIfRunningAgainstService(5000);
for (KeyProperties actualKey : keyClient.listPropertiesOfKeys()) {
if (keysToList.containsKey(actualKey.getName())) {
CreateKeyOptions expectedKey = keysToList.get(actualKey.getName());
assertEquals(expectedKey.getExpiresOn(), actualKey.getExpiresOn());
assertEquals(expectedKey.getNotBefore(), actualKey.getNotBefore());
keysToList.remove(actualKey.getName());
}
}
assertEquals(0, keysToList.size());
});
}
/**
* Tests that a deleted key can be retrieved on a soft-delete enabled vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getDeletedKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
getDeletedKeyRunner((keyToDeleteAndGet) -> {
assertKeyEquals(keyToDeleteAndGet, keyClient.createKey(keyToDeleteAndGet));
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(keyToDeleteAndGet.getName()));
poller.waitForCompletion();
sleepIfRunningAgainstService(30000);
DeletedKey deletedKey = keyClient.getDeletedKey(keyToDeleteAndGet.getName());
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
assertNotNull(deletedKey.getScheduledPurgeDate());
assertEquals(keyToDeleteAndGet.getName(), deletedKey.getName());
});
}
/**
* Tests that deleted keys can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listDeletedKeys(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
if (interceptorManager.isLiveMode()) {
return;
}
listDeletedKeysRunner((keysToList) -> {
for (CreateKeyOptions key : keysToList.values()) {
assertKeyEquals(key, keyClient.createKey(key));
}
for (CreateKeyOptions key : keysToList.values()) {
SyncPoller<DeletedKey, Void> poller = setPlaybackSyncPollerPollInterval(
keyClient.beginDeleteKey(key.getName()));
poller.waitForCompletion();
}
sleepIfRunningAgainstService(90000);
Iterable<DeletedKey> deletedKeys = keyClient.listDeletedKeys();
for (DeletedKey deletedKey : deletedKeys) {
assertNotNull(deletedKey.getDeletedOn());
assertNotNull(deletedKey.getRecoveryId());
}
});
}
/**
* Tests that key versions can be listed in the key vault.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void listKeyVersions(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
listKeyVersionsRunner((keysToList) -> {
String keyName = null;
for (CreateKeyOptions key : keysToList) {
keyName = key.getName();
sleepIfRunningAgainstService(4000);
assertKeyEquals(key, keyClient.createKey(key));
}
Iterable<KeyProperties> keyVersionsOutput = keyClient.listPropertiesOfKeyVersions(keyName);
List<KeyProperties> keyVersionsList = new ArrayList<>();
keyVersionsOutput.forEach(keyVersionsList::add);
assertEquals(keysToList.size(), keyVersionsList.size());
});
}
/**
* Tests that an existing key can be released.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void releaseKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(runManagedHsmTest && runReleaseKeyTest);
createKeyClient(httpClient, serviceVersion);
releaseKeyRunner((keyToRelease, attestationUrl) -> {
assertKeyEquals(keyToRelease, keyClient.createRsaKey(keyToRelease));
String targetAttestationToken = "testAttestationToken";
if (getTestMode() != TestMode.PLAYBACK) {
if (!attestationUrl.endsWith("/")) {
attestationUrl = attestationUrl + "/";
}
targetAttestationToken = getAttestationToken(attestationUrl + "generate-test-token");
}
ReleaseKeyResult releaseKeyResult = keyClient.releaseKey(keyToRelease.getName(), targetAttestationToken);
assertNotNull(releaseKeyResult.getValue());
});
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyOfNonExistentKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("nonExistentKey", 20);
assertThrows(ResourceNotFoundException.class, () -> keyClient.getKeyRotationPolicy(keyName));
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void getKeyRotationPolicyWithNoPolicySet(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy keyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertNotNull(keyRotationPolicy);
assertNull(keyRotationPolicy.getId());
assertNull(keyRotationPolicy.getCreatedOn());
assertNull(keyRotationPolicy.getUpdatedOn());
assertNull(keyRotationPolicy.getExpiresIn());
assertEquals(1, keyRotationPolicy.getLifetimeActions().size());
assertEquals(KeyRotationPolicyAction.NOTIFY, keyRotationPolicy.getLifetimeActions().get(0).getAction());
assertEquals("P30D", keyRotationPolicy.getLifetimeActions().get(0).getTimeBeforeExpiry());
assertNull(keyRotationPolicy.getLifetimeActions().get(0).getTimeAfterCreate());
}
/**
* Tests that fetching the key rotation policy of a non-existent key throws.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@Disabled("Disable after https:
public void updateGetKeyRotationPolicyWithMinimumProperties(HttpClient httpClient,
KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithMinimumPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that an key rotation policy can be updated with all possible properties, then retrieves it.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void updateGetKeyRotationPolicyWithAllProperties(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
updateGetKeyRotationPolicyWithAllPropertiesRunner((keyName, keyRotationPolicy) -> {
keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyRotationPolicy updatedKeyRotationPolicy =
keyClient.updateKeyRotationPolicy(keyName, keyRotationPolicy);
KeyRotationPolicy retrievedKeyRotationPolicy = keyClient.getKeyRotationPolicy(keyName);
assertKeyVaultRotationPolicyEquals(updatedKeyRotationPolicy, retrievedKeyRotationPolicy);
});
}
/**
* Tests that a key can be rotated.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
@DisabledIfSystemProperty(named = "IS_SKIP_ROTATION_POLICY_TEST", matches = "true")
public void rotateKey(HttpClient httpClient, KeyServiceVersion serviceVersion) {
Assumptions.assumeTrue(!isHsmEnabled);
createKeyClient(httpClient, serviceVersion);
String keyName = testResourceNamer.randomName("rotateKey", 20);
KeyVaultKey createdKey = keyClient.createRsaKey(new CreateRsaKeyOptions(keyName));
KeyVaultKey rotatedKey = keyClient.rotateKey(keyName);
assertEquals(createdKey.getName(), rotatedKey.getName());
assertEquals(createdKey.getProperties().getTags(), rotatedKey.getProperties().getTags());
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClient(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}. Also tests
* that cryptographic operations can be performed with said cryptography client.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientAndEncryptDecrypt(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
createKeyRunner((keyToCreate) -> {
assertKeyEquals(keyToCreate, keyClient.createKey(keyToCreate));
CryptographyClient cryptographyClient = keyClient.getCryptographyClient(keyToCreate.getName());
assertNotNull(cryptographyClient);
byte[] plaintext = "myPlaintext".getBytes();
byte[] ciphertext = cryptographyClient.encrypt(EncryptionAlgorithm.RSA_OAEP, plaintext).getCipherText();
byte[] decryptedText = cryptographyClient.decrypt(EncryptionAlgorithm.RSA_OAEP, ciphertext).getPlainText();
assertArrayEquals(plaintext, decryptedText);
});
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key and version using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient =
keyClient.getCryptographyClient("myKey", "6A385B124DEF4096AF1361A85B16C204");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithEmptyKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", "");
assertNotNull(cryptographyClient);
}
/**
* Tests that a {@link CryptographyClient} can be created for a given key using a {@link KeyClient}.
*/
@ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS)
@MethodSource("getTestParameters")
public void getCryptographyClientWithNullKeyVersion(HttpClient httpClient, KeyServiceVersion serviceVersion) {
createKeyClient(httpClient, serviceVersion);
CryptographyClient cryptographyClient = keyClient.getCryptographyClient("myKey", null);
assertNotNull(cryptographyClient);
}
private void pollOnKeyPurge(String keyName) {
int pendingPollCount = 0;
while (pendingPollCount < 10) {
DeletedKey deletedKey = null;
try {
deletedKey = keyClient.getDeletedKey(keyName);
} catch (ResourceNotFoundException ignored) {
}
if (deletedKey != null) {
sleepIfRunningAgainstService(2000);
pendingPollCount += 1;
} else {
return;
}
}
System.err.printf("Deleted Key %s was not purged \n", keyName);
}
} |
Is this something that should be configurable? That way we can have a default and an escape hatch if needed. There are also other locations where we're using `HttpURLConnection` in this class, should those also have a connection timeout? | private Mono<Boolean> checkIMDSAvailable(String endpoint) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = getUrl(endpoint + "?api-version=2018-02-01");
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(1000);
connection.connect();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
} | connection.setConnectTimeout(1000); | private Mono<Boolean> checkIMDSAvailable(String endpoint) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = getUrl(endpoint + "?api-version=2018-02-01");
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(1000);
connection.connect();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
} | class IdentityClient extends IdentityClientBase {
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId,
clientAssertionSupplier, certificate, certificatePassword, isSharedTokenCacheCredential,
clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(true));
this.managedIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getManagedIdentityConfidentialClientApplication);
this.workloadIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getWorkloadIdentityConfidentialClientApplication);
Duration cacheTimeout = (clientAssertionTimeout == null) ? Duration.ofMinutes(5) : clientAssertionTimeout;
this.clientAssertionAccessor = new SynchronizedAccessor<>(this::parseClientAssertion, cacheTimeout);
}
private Mono<ConfidentialClientApplication> getConfidentialClientApplication(boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getConfidentialClient(enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getManagedIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getManagedIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getWorkloadIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getWorkloadIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
ManagedIdentityParameters parameters = options.getManagedIdentityParameters();
ManagedIdentityType managedIdentityType = options.getManagedIdentityType();
switch (managedIdentityType) {
case APP_SERVICE:
return authenticateToManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getMsiEndpoint(), parameters.getMsiSecret(),
tokenRequestContext);
case SERVICE_FABRIC:
return authenticateToServiceFabricManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getIdentityServerThumbprint(), tokenRequestContext);
case ARC:
return authenticateToArcManagedIdentityEndpoint(parameters.getIdentityEndpoint(), tokenRequestContext);
case AKS:
return authenticateWithExchangeToken(tokenRequestContext);
case VM:
return authenticateToIMDSEndpoint(tokenRequestContext);
default:
return Mono.error(LOGGER.logExceptionAsError(
new CredentialUnavailableException("Unknown Managed Identity type, authentication not available.")));
}
}
private Mono<String> parseClientAssertion() {
return Mono.fromCallable(() -> {
if (clientAssertionFilePath != null) {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
});
}
private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential, boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getPublicClient(sharedTokenCacheCredential, enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) {
try {
IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath());
String cachedRefreshToken = cacheAccessor.getIntelliJCredentialsFromIdentityMsalCache();
if (!CoreUtils.isNullOrEmpty(cachedRefreshToken)) {
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), cachedRefreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
}
IntelliJAuthMethodDetails authDetails;
try {
authDetails = cacheAccessor.getAuthDetailsIfAvailable();
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available.", e)));
}
if (authDetails == null) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please log in with Azure Tools for IntelliJ plugin in the IDE."
+ " Fore more details refer to the troubleshooting guidelines here at"
+ " https:
}
String authType = authDetails.getAuthMethod();
if ("SP".equalsIgnoreCase(authType)) {
Map<String, String> spDetails = cacheAccessor
.getIntellijServicePrincipalDetails(authDetails.getCredFilePath());
String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant");
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(spDetails.get("client"),
ClientCredentialFactory.createFromSecret(spDetails.get("key")))
.authority(authorityUrl)
.instanceDiscovery(options.isInstanceDiscoveryEnabled());
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build())).map(MsalToken::new);
} catch (MalformedURLException e) {
return Mono.error(e);
}
} else if ("DC".equalsIgnoreCase(authType)) {
LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools"
+ " for IntelliJ Plugin.");
if (isADFSTenant()) {
LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and"
+ " the ADFS tenants are not supported via IntelliJ Authentication currently.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported.")));
}
try {
JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials();
String refreshToken = intelliJCredentials.get("refreshToken").textValue();
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
} else {
LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication"
+ " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one"
+ " of those schemes from Azure Tools for IntelliJ plugin.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please login with Azure Tools for IntelliJ plugin in the IDE.")));
}
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
azCommand.append(scopes);
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureCLIAuthentication(azCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request")));
}
for (String scope : scopes) {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
}
azdCommand.append(String.join(" --scope ", scopes));
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) {
ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER);
List<CredentialUnavailableException> exceptions = new ArrayList<>(2);
PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows()
? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE);
PowershellManager legacyPowerShellManager = Platform.isWindows()
? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null;
List<PowershellManager> powershellManagers = new ArrayList<>(2);
powershellManagers.add(defaultPowerShellManager);
if (legacyPowerShellManager != null) {
powershellManagers.add(legacyPowerShellManager);
}
return Flux.fromIterable(powershellManagers)
.flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager)
.onErrorResume(t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
"Azure Powershell authentication failed. Error Details: " + t.getMessage()
+ ". To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
null, t));
}
exceptions.add((CredentialUnavailableException) t);
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException("Azure PowerShell authentication failed using default"
+ "powershell(pwsh) with following error: " + current.getMessage()
+ "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)"
+ " with following error: " + last.getMessage(),
last.getCause());
}
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last)));
}));
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(buildOBOFlowParameters(request)))
.map(MsalToken::new));
}
private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request,
PowershellManager powershellManager) {
String scope = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
return Mono.using(() -> powershellManager, manager -> manager.initSession().flatMap(m -> {
String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru";
return m.runCommand(azAccountsCommand).flatMap(output -> {
if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded "
+ "because no valid module file")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Az.Account module with version >= 2.2.0 is not installed. "
+ "It needs to be installed to use Azure PowerShell "
+ "Credential.")));
}
LOGGER.verbose("Az.accounts module was found installed.");
String command = "Get-AzAccessToken -ResourceUrl '"
+ scope
+ "' | ConvertTo-Json";
LOGGER.verbose("Azure Powershell Authentication => Executing the command `{}` in Azure "
+ "Powershell to retrieve the Access Token.", command);
return m.runCommand(command).flatMap(out -> {
if (out.contains("Run Connect-AzAccount to login")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Run Connect-AzAccount to login to Azure account in PowerShell.")));
}
try {
LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the "
+ "received response from Azure Powershell.");
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("Token");
String time = objectMap.get("ExpiresOn");
OffsetDateTime expiresOn = OffsetDateTime.parse(time).withOffsetSameInstant(ZoneOffset.UTC);
return Mono.just(new AccessToken(accessToken, expiresOn));
} catch (IOException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Encountered error when deserializing response from Azure Power Shell.", e)));
}
});
});
}), PowershellManager::close);
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder = buildConfidentialClientParameters(request);
return confidentialClient.acquireToken(builder.build());
}
)).map(MsalToken::new);
}
private SynchronizedAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext requestContext) {
return requestContext.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private ClientCredentialParameters.ClientCredentialParametersBuilder buildConfidentialClientParameters(TokenRequestContext request) {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
builder.claims(customClaimRequest);
}
return builder;
}
public Mono<AccessToken> authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
return managedIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
public Mono<AccessToken> authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
return workloadIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> {
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder
= buildUsernamePasswordFlowParameters(request, username, password);
return pc.acquireToken(userNamePasswordParametersBuilder.build());
}
)).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, false)
).map(MsalToken::new)
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))
.switchIfEmpty(Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, true)
).map(MsalToken::new))
);
}
private CompletableFuture<IAuthenticationResult> acquireTokenFromPublicClientSilently(TokenRequestContext request,
PublicClientApplication pc,
IAccount account,
boolean forceRefresh
) {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (forceRefresh) {
parametersBuilder.forceRefresh(true);
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return pc.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}
private SynchronizedAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) {
return authenticateWithConfidentialClientCache(request, null);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
*
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request, IAccount account) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (account != null) {
parametersBuilder.account(account);
}
try {
return confidentialClient.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}).map(ar -> new MsalToken(ar))
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))));
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return getPublicClientInstance(request).getValue().flatMap(pc ->
Mono.fromFuture(() -> {
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
return pc.acquireToken(parametersBuilder.build());
}).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code.", null, t))
.map(MsalToken::new));
}
/**
* Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken.
*/
public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) {
if (isADFSTenant()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("VsCodeCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported. "
+ "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor();
String credential = null;
try {
credential = accessor.getCredentials("VS Code Azure", cloud);
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters
.builder(new HashSet<>(request.getScopes()), credential);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build()))
.onErrorResume(t -> {
if (t instanceof MsalInteractionRequiredException) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Failed to acquire token with"
+ " VS code credential."
+ " To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
return Mono.error(new ClientAuthenticationException("Failed to acquire token with"
+ " VS code credential", null, t));
})
.map(MsalToken::new)); }
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder =
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
Mono<IAuthenticationResult> acquireToken;
if (clientSecret != null) {
acquireToken = getConfidentialClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
} else {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
acquireToken = publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
}
return acquireToken.onErrorMap(t -> new ClientAuthenticationException(
"Failed to acquire token with authorization code", null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
return getPublicClientInstance(request).getValue().flatMap(pc -> {
if (options.isBrokerEnabled() && options.useDefaultBrokerAccount()) {
return Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, null, false))
.onErrorResume(e -> Mono.empty());
} else {
return Mono.empty();
}
})
.switchIfEmpty(Mono.defer(() -> {
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build())));
}))
.onErrorMap(t -> !(t instanceof ClientAuthenticationException),
t -> {
throw new ClientAuthenticationException("Failed to acquire token with Interactive Browser Authentication.", null, t);
})
.map(MsalToken::new);
}
/**
* Gets token from shared token cache
* */
public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(pc::getAccounts))
.onErrorMap(t -> new CredentialUnavailableException(
"Cannot get accounts from token cache. Error: " + t.getMessage(), t))
.flatMap(set -> {
IAccount requestedAccount;
Map<String, IAccount> accounts = new HashMap<>();
if (set.isEmpty()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("SharedTokenCacheCredential "
+ "authentication unavailable. No accounts were found in the cache.")));
}
for (IAccount cached : set) {
if (username == null || username.equals(cached.username())) {
accounts.putIfAbsent(cached.homeAccountId(), cached);
}
}
if (accounts.isEmpty()) {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. No account matching the specified username: %s was "
+ "found in the cache.", username)));
} else if (accounts.size() > 1) {
if (username == null) {
return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication unavailable. "
+ "Multiple accounts were found in the cache. Use username and tenant id to disambiguate.")
);
} else {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. Multiple accounts matching the specified username: "
+ "%s were found in the cache.", username)));
}
} else {
requestedAccount = accounts.values().iterator().next();
}
return authenticateWithPublicClientCache(request, requestedAccount);
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
String payload = identityEndpoint + "?resource="
+ urlEncode(ScopeUtil.scopesToResource(request.getScopes()))
+ "&api-version=" + ARC_MANAGED_IDENTITY_ENDPOINT_API_VERSION;
URL url = getUrl(payload);
String secretKey = null;
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
} catch (IOException e) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize "
+ "Http URL connection to the endpoint.",
null, e));
}
int status = connection.getResponseCode();
if (status != 401) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401"
+ " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status),
null, e));
}
} finally {
String realm = connection.getHeaderField("WWW-Authenticate");
if (realm == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
int separatorIndex = realm.indexOf("=");
if (separatorIndex == -1) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
String secretKeyPath = realm.substring(separatorIndex + 1);
secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8);
if (connection != null) {
connection.disconnect();
}
if (secretKey == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value"
+ " in the response from Azure Arc Managed Identity Endpoint",
null));
}
}
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Authorization", "Basic " + secretKey);
connection.setRequestProperty("Metadata", "true");
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) {
return clientAssertionAccessor.getValue()
.flatMap(assertionToken -> Mono.fromCallable(() -> authenticateWithExchangeTokenHelper(request, assertionToken)));
}
/**
* Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint,
String identityHeader,
String thumbprint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpsURLConnection connection = null;
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder(1024)
.append(identityEndpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION);
if (clientId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpsURLConnection) url.openConnection();
IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER);
connection.setRequestMethod("GET");
if (identityHeader != null) {
connection.setRequestProperty("Secret", identityHeader);
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
* <p>
* Specifying identity parameters will use the 2019-08-01 endpoint version.
* Specifying MSI parameters will use the 2017-09-01 endpoint version.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param msiEndpoint the MSI endpoint to acquire token from
* @param msiSecret the MSI secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader,
String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String endpoint;
String headerValue;
String endpointVersion;
if (identityEndpoint != null) {
endpoint = identityEndpoint;
headerValue = identityHeader;
endpointVersion = IDENTITY_ENDPOINT_VERSION;
} else {
endpoint = msiEndpoint;
headerValue = msiSecret;
endpointVersion = MSI_ENDPOINT_VERSION;
}
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder(1024)
.append(endpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name()));
if (clientId != null) {
if (endpointVersion.equals(IDENTITY_ENDPOINT_VERSION)) {
payload.append("&client_id=");
} else {
if (headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&clientid=");
}
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
if (endpointVersion.equals(MSI_ENDPOINT_VERSION) && headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (headerValue != null) {
if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) {
connection.setRequestProperty("X-IDENTITY-HEADER", headerValue);
} else {
connection.setRequestProperty("Secret", headerValue);
}
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=2018-02-01");
payload.append("&resource=");
payload.append(urlEncode(resource));
if (clientId != null) {
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
} catch (IOException exception) {
return Mono.error(exception);
}
String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("")
+ IdentityConstants.DEFAULT_IMDS_TOKENPATH;
return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url = getUrl(endpoint + "?" + payload);
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} catch (IOException exception) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Could not connect to the url: " + url + ".", exception));
}
int responseCode;
try {
responseCode = connection.getResponseCode();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
}
if (responseCode == 400) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established.", null));
}
if (responseCode == 403) {
if (connection.getResponseMessage()
.contains("A socket operation was attempted to an unreachable network")) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Managed Identity response was not in the expected format."
+ " See the inner exception for details.",
new Exception(connection.getResponseMessage())));
}
}
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = getRetryTimeoutInMs(retry);
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw LOGGER.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
int getRetryTimeoutInMs(int retry) {
return (int) options.getRetryTimeout()
.apply(Duration.ofSeconds(retry)).toMillis();
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) {
CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>();
completableFuture.completeExceptionally(e);
return completableFuture;
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
private boolean isADFSTenant() {
return ADFS_TENANT.equals(this.tenantId);
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
Mono<AccessToken> accessTokenAsync = authenticateWithExchangeToken(trc);
return accessTokenAsync.map(accessToken -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
}).toFuture();
};
}
} | class IdentityClient extends IdentityClientBase {
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId,
clientAssertionSupplier, certificate, certificatePassword, isSharedTokenCacheCredential,
clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(true));
this.managedIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getManagedIdentityConfidentialClientApplication);
this.workloadIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getWorkloadIdentityConfidentialClientApplication);
Duration cacheTimeout = (clientAssertionTimeout == null) ? Duration.ofMinutes(5) : clientAssertionTimeout;
this.clientAssertionAccessor = new SynchronizedAccessor<>(this::parseClientAssertion, cacheTimeout);
}
private Mono<ConfidentialClientApplication> getConfidentialClientApplication(boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getConfidentialClient(enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getManagedIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getManagedIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getWorkloadIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getWorkloadIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
ManagedIdentityParameters parameters = options.getManagedIdentityParameters();
ManagedIdentityType managedIdentityType = options.getManagedIdentityType();
switch (managedIdentityType) {
case APP_SERVICE:
return authenticateToManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getMsiEndpoint(), parameters.getMsiSecret(),
tokenRequestContext);
case SERVICE_FABRIC:
return authenticateToServiceFabricManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getIdentityServerThumbprint(), tokenRequestContext);
case ARC:
return authenticateToArcManagedIdentityEndpoint(parameters.getIdentityEndpoint(), tokenRequestContext);
case AKS:
return authenticateWithExchangeToken(tokenRequestContext);
case VM:
return authenticateToIMDSEndpoint(tokenRequestContext);
default:
return Mono.error(LOGGER.logExceptionAsError(
new CredentialUnavailableException("Unknown Managed Identity type, authentication not available.")));
}
}
private Mono<String> parseClientAssertion() {
return Mono.fromCallable(() -> {
if (clientAssertionFilePath != null) {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
});
}
private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential, boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getPublicClient(sharedTokenCacheCredential, enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) {
try {
IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath());
String cachedRefreshToken = cacheAccessor.getIntelliJCredentialsFromIdentityMsalCache();
if (!CoreUtils.isNullOrEmpty(cachedRefreshToken)) {
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), cachedRefreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
}
IntelliJAuthMethodDetails authDetails;
try {
authDetails = cacheAccessor.getAuthDetailsIfAvailable();
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available.", e)));
}
if (authDetails == null) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please log in with Azure Tools for IntelliJ plugin in the IDE."
+ " Fore more details refer to the troubleshooting guidelines here at"
+ " https:
}
String authType = authDetails.getAuthMethod();
if ("SP".equalsIgnoreCase(authType)) {
Map<String, String> spDetails = cacheAccessor
.getIntellijServicePrincipalDetails(authDetails.getCredFilePath());
String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant");
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(spDetails.get("client"),
ClientCredentialFactory.createFromSecret(spDetails.get("key")))
.authority(authorityUrl)
.instanceDiscovery(options.isInstanceDiscoveryEnabled());
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build())).map(MsalToken::new);
} catch (MalformedURLException e) {
return Mono.error(e);
}
} else if ("DC".equalsIgnoreCase(authType)) {
LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools"
+ " for IntelliJ Plugin.");
if (isADFSTenant()) {
LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and"
+ " the ADFS tenants are not supported via IntelliJ Authentication currently.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported.")));
}
try {
JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials();
String refreshToken = intelliJCredentials.get("refreshToken").textValue();
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
} else {
LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication"
+ " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one"
+ " of those schemes from Azure Tools for IntelliJ plugin.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please login with Azure Tools for IntelliJ plugin in the IDE.")));
}
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
azCommand.append(scopes);
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureCLIAuthentication(azCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request")));
}
for (String scope : scopes) {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
}
azdCommand.append(String.join(" --scope ", scopes));
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) {
ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER);
List<CredentialUnavailableException> exceptions = new ArrayList<>(2);
PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows()
? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE);
PowershellManager legacyPowerShellManager = Platform.isWindows()
? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null;
List<PowershellManager> powershellManagers = new ArrayList<>(2);
powershellManagers.add(defaultPowerShellManager);
if (legacyPowerShellManager != null) {
powershellManagers.add(legacyPowerShellManager);
}
return Flux.fromIterable(powershellManagers)
.flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager)
.onErrorResume(t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
"Azure Powershell authentication failed. Error Details: " + t.getMessage()
+ ". To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
null, t));
}
exceptions.add((CredentialUnavailableException) t);
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException("Azure PowerShell authentication failed using default"
+ "powershell(pwsh) with following error: " + current.getMessage()
+ "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)"
+ " with following error: " + last.getMessage(),
last.getCause());
}
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last)));
}));
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(buildOBOFlowParameters(request)))
.map(MsalToken::new));
}
private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request,
PowershellManager powershellManager) {
String scope = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
return Mono.using(() -> powershellManager, manager -> manager.initSession().flatMap(m -> {
String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru";
return m.runCommand(azAccountsCommand).flatMap(output -> {
if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded "
+ "because no valid module file")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Az.Account module with version >= 2.2.0 is not installed. "
+ "It needs to be installed to use Azure PowerShell "
+ "Credential.")));
}
LOGGER.verbose("Az.accounts module was found installed.");
String command = "Get-AzAccessToken -ResourceUrl '"
+ scope
+ "' | ConvertTo-Json";
LOGGER.verbose("Azure Powershell Authentication => Executing the command `{}` in Azure "
+ "Powershell to retrieve the Access Token.", command);
return m.runCommand(command).flatMap(out -> {
if (out.contains("Run Connect-AzAccount to login")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Run Connect-AzAccount to login to Azure account in PowerShell.")));
}
try {
LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the "
+ "received response from Azure Powershell.");
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("Token");
String time = objectMap.get("ExpiresOn");
OffsetDateTime expiresOn = OffsetDateTime.parse(time).withOffsetSameInstant(ZoneOffset.UTC);
return Mono.just(new AccessToken(accessToken, expiresOn));
} catch (IOException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Encountered error when deserializing response from Azure Power Shell.", e)));
}
});
});
}), PowershellManager::close);
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder = buildConfidentialClientParameters(request);
return confidentialClient.acquireToken(builder.build());
}
)).map(MsalToken::new);
}
private SynchronizedAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext requestContext) {
return requestContext.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private ClientCredentialParameters.ClientCredentialParametersBuilder buildConfidentialClientParameters(TokenRequestContext request) {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
builder.claims(customClaimRequest);
}
return builder;
}
public Mono<AccessToken> authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
return managedIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
public Mono<AccessToken> authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
return workloadIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> {
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder
= buildUsernamePasswordFlowParameters(request, username, password);
return pc.acquireToken(userNamePasswordParametersBuilder.build());
}
)).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, false)
).map(MsalToken::new)
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))
.switchIfEmpty(Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, true)
).map(MsalToken::new))
);
}
private CompletableFuture<IAuthenticationResult> acquireTokenFromPublicClientSilently(TokenRequestContext request,
PublicClientApplication pc,
IAccount account,
boolean forceRefresh
) {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (forceRefresh) {
parametersBuilder.forceRefresh(true);
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return pc.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}
private SynchronizedAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) {
return authenticateWithConfidentialClientCache(request, null);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
*
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request, IAccount account) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (account != null) {
parametersBuilder.account(account);
}
try {
return confidentialClient.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}).map(ar -> new MsalToken(ar))
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))));
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return getPublicClientInstance(request).getValue().flatMap(pc ->
Mono.fromFuture(() -> {
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
return pc.acquireToken(parametersBuilder.build());
}).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code.", null, t))
.map(MsalToken::new));
}
/**
* Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken.
*/
public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) {
if (isADFSTenant()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("VsCodeCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported. "
+ "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor();
String credential = null;
try {
credential = accessor.getCredentials("VS Code Azure", cloud);
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters
.builder(new HashSet<>(request.getScopes()), credential);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build()))
.onErrorResume(t -> {
if (t instanceof MsalInteractionRequiredException) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Failed to acquire token with"
+ " VS code credential."
+ " To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
return Mono.error(new ClientAuthenticationException("Failed to acquire token with"
+ " VS code credential", null, t));
})
.map(MsalToken::new)); }
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder =
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
Mono<IAuthenticationResult> acquireToken;
if (clientSecret != null) {
acquireToken = getConfidentialClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
} else {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
acquireToken = publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
}
return acquireToken.onErrorMap(t -> new ClientAuthenticationException(
"Failed to acquire token with authorization code", null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
return getPublicClientInstance(request).getValue().flatMap(pc -> {
if (options.isBrokerEnabled() && options.useDefaultBrokerAccount()) {
return Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, null, false))
.onErrorResume(e -> Mono.empty());
} else {
return Mono.empty();
}
})
.switchIfEmpty(Mono.defer(() -> {
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build())));
}))
.onErrorMap(t -> !(t instanceof ClientAuthenticationException),
t -> {
throw new ClientAuthenticationException("Failed to acquire token with Interactive Browser Authentication.", null, t);
})
.map(MsalToken::new);
}
/**
* Gets token from shared token cache
* */
public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(pc::getAccounts))
.onErrorMap(t -> new CredentialUnavailableException(
"Cannot get accounts from token cache. Error: " + t.getMessage(), t))
.flatMap(set -> {
IAccount requestedAccount;
Map<String, IAccount> accounts = new HashMap<>();
if (set.isEmpty()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("SharedTokenCacheCredential "
+ "authentication unavailable. No accounts were found in the cache.")));
}
for (IAccount cached : set) {
if (username == null || username.equals(cached.username())) {
accounts.putIfAbsent(cached.homeAccountId(), cached);
}
}
if (accounts.isEmpty()) {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. No account matching the specified username: %s was "
+ "found in the cache.", username)));
} else if (accounts.size() > 1) {
if (username == null) {
return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication unavailable. "
+ "Multiple accounts were found in the cache. Use username and tenant id to disambiguate.")
);
} else {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. Multiple accounts matching the specified username: "
+ "%s were found in the cache.", username)));
}
} else {
requestedAccount = accounts.values().iterator().next();
}
return authenticateWithPublicClientCache(request, requestedAccount);
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
String payload = identityEndpoint + "?resource="
+ urlEncode(ScopeUtil.scopesToResource(request.getScopes()))
+ "&api-version=" + ARC_MANAGED_IDENTITY_ENDPOINT_API_VERSION;
URL url = getUrl(payload);
String secretKey = null;
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
} catch (IOException e) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize "
+ "Http URL connection to the endpoint.",
null, e));
}
int status = connection.getResponseCode();
if (status != 401) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401"
+ " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status),
null, e));
}
} finally {
String realm = connection.getHeaderField("WWW-Authenticate");
if (realm == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
int separatorIndex = realm.indexOf("=");
if (separatorIndex == -1) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
String secretKeyPath = realm.substring(separatorIndex + 1);
secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8);
if (connection != null) {
connection.disconnect();
}
if (secretKey == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value"
+ " in the response from Azure Arc Managed Identity Endpoint",
null));
}
}
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Authorization", "Basic " + secretKey);
connection.setRequestProperty("Metadata", "true");
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) {
return clientAssertionAccessor.getValue()
.flatMap(assertionToken -> Mono.fromCallable(() -> authenticateWithExchangeTokenHelper(request, assertionToken)));
}
/**
* Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint,
String identityHeader,
String thumbprint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpsURLConnection connection = null;
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder(1024)
.append(identityEndpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION);
if (clientId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpsURLConnection) url.openConnection();
IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER);
connection.setRequestMethod("GET");
if (identityHeader != null) {
connection.setRequestProperty("Secret", identityHeader);
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
* <p>
* Specifying identity parameters will use the 2019-08-01 endpoint version.
* Specifying MSI parameters will use the 2017-09-01 endpoint version.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param msiEndpoint the MSI endpoint to acquire token from
* @param msiSecret the MSI secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader,
String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String endpoint;
String headerValue;
String endpointVersion;
if (identityEndpoint != null) {
endpoint = identityEndpoint;
headerValue = identityHeader;
endpointVersion = IDENTITY_ENDPOINT_VERSION;
} else {
endpoint = msiEndpoint;
headerValue = msiSecret;
endpointVersion = MSI_ENDPOINT_VERSION;
}
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder(1024)
.append(endpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name()));
if (clientId != null) {
if (endpointVersion.equals(IDENTITY_ENDPOINT_VERSION)) {
payload.append("&client_id=");
} else {
if (headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&clientid=");
}
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
if (endpointVersion.equals(MSI_ENDPOINT_VERSION) && headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (headerValue != null) {
if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) {
connection.setRequestProperty("X-IDENTITY-HEADER", headerValue);
} else {
connection.setRequestProperty("Secret", headerValue);
}
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=2018-02-01");
payload.append("&resource=");
payload.append(urlEncode(resource));
if (clientId != null) {
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
} catch (IOException exception) {
return Mono.error(exception);
}
String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("")
+ IdentityConstants.DEFAULT_IMDS_TOKENPATH;
return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url = getUrl(endpoint + "?" + payload);
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} catch (IOException exception) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Could not connect to the url: " + url + ".", exception));
}
int responseCode;
try {
responseCode = connection.getResponseCode();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
}
if (responseCode == 400) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established.", null));
}
if (responseCode == 403) {
if (connection.getResponseMessage()
.contains("A socket operation was attempted to an unreachable network")) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Managed Identity response was not in the expected format."
+ " See the inner exception for details.",
new Exception(connection.getResponseMessage())));
}
}
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = getRetryTimeoutInMs(retry);
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw LOGGER.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
int getRetryTimeoutInMs(int retry) {
return (int) options.getRetryTimeout()
.apply(Duration.ofSeconds(retry)).toMillis();
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) {
CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>();
completableFuture.completeExceptionally(e);
return completableFuture;
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
private boolean isADFSTenant() {
return ADFS_TENANT.equals(this.tenantId);
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
Mono<AccessToken> accessTokenAsync = authenticateWithExchangeToken(trc);
return accessTokenAsync.map(accessToken -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
}).toFuture();
};
}
} |
nope, this is just to align across languages and is intended to be at this stage only, and risk is low as we're only increasing it, with users not facing any issue at lower value too. Doesn't need to be configurable, | private Mono<Boolean> checkIMDSAvailable(String endpoint) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = getUrl(endpoint + "?api-version=2018-02-01");
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(1000);
connection.connect();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
} | connection.setConnectTimeout(1000); | private Mono<Boolean> checkIMDSAvailable(String endpoint) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = getUrl(endpoint + "?api-version=2018-02-01");
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(1000);
connection.connect();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
} | class IdentityClient extends IdentityClientBase {
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId,
clientAssertionSupplier, certificate, certificatePassword, isSharedTokenCacheCredential,
clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(true));
this.managedIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getManagedIdentityConfidentialClientApplication);
this.workloadIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getWorkloadIdentityConfidentialClientApplication);
Duration cacheTimeout = (clientAssertionTimeout == null) ? Duration.ofMinutes(5) : clientAssertionTimeout;
this.clientAssertionAccessor = new SynchronizedAccessor<>(this::parseClientAssertion, cacheTimeout);
}
private Mono<ConfidentialClientApplication> getConfidentialClientApplication(boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getConfidentialClient(enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getManagedIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getManagedIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getWorkloadIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getWorkloadIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
ManagedIdentityParameters parameters = options.getManagedIdentityParameters();
ManagedIdentityType managedIdentityType = options.getManagedIdentityType();
switch (managedIdentityType) {
case APP_SERVICE:
return authenticateToManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getMsiEndpoint(), parameters.getMsiSecret(),
tokenRequestContext);
case SERVICE_FABRIC:
return authenticateToServiceFabricManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getIdentityServerThumbprint(), tokenRequestContext);
case ARC:
return authenticateToArcManagedIdentityEndpoint(parameters.getIdentityEndpoint(), tokenRequestContext);
case AKS:
return authenticateWithExchangeToken(tokenRequestContext);
case VM:
return authenticateToIMDSEndpoint(tokenRequestContext);
default:
return Mono.error(LOGGER.logExceptionAsError(
new CredentialUnavailableException("Unknown Managed Identity type, authentication not available.")));
}
}
private Mono<String> parseClientAssertion() {
return Mono.fromCallable(() -> {
if (clientAssertionFilePath != null) {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
});
}
private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential, boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getPublicClient(sharedTokenCacheCredential, enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) {
try {
IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath());
String cachedRefreshToken = cacheAccessor.getIntelliJCredentialsFromIdentityMsalCache();
if (!CoreUtils.isNullOrEmpty(cachedRefreshToken)) {
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), cachedRefreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
}
IntelliJAuthMethodDetails authDetails;
try {
authDetails = cacheAccessor.getAuthDetailsIfAvailable();
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available.", e)));
}
if (authDetails == null) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please log in with Azure Tools for IntelliJ plugin in the IDE."
+ " Fore more details refer to the troubleshooting guidelines here at"
+ " https:
}
String authType = authDetails.getAuthMethod();
if ("SP".equalsIgnoreCase(authType)) {
Map<String, String> spDetails = cacheAccessor
.getIntellijServicePrincipalDetails(authDetails.getCredFilePath());
String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant");
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(spDetails.get("client"),
ClientCredentialFactory.createFromSecret(spDetails.get("key")))
.authority(authorityUrl)
.instanceDiscovery(options.isInstanceDiscoveryEnabled());
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build())).map(MsalToken::new);
} catch (MalformedURLException e) {
return Mono.error(e);
}
} else if ("DC".equalsIgnoreCase(authType)) {
LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools"
+ " for IntelliJ Plugin.");
if (isADFSTenant()) {
LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and"
+ " the ADFS tenants are not supported via IntelliJ Authentication currently.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported.")));
}
try {
JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials();
String refreshToken = intelliJCredentials.get("refreshToken").textValue();
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
} else {
LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication"
+ " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one"
+ " of those schemes from Azure Tools for IntelliJ plugin.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please login with Azure Tools for IntelliJ plugin in the IDE.")));
}
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
azCommand.append(scopes);
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureCLIAuthentication(azCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request")));
}
for (String scope : scopes) {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
}
azdCommand.append(String.join(" --scope ", scopes));
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) {
ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER);
List<CredentialUnavailableException> exceptions = new ArrayList<>(2);
PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows()
? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE);
PowershellManager legacyPowerShellManager = Platform.isWindows()
? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null;
List<PowershellManager> powershellManagers = new ArrayList<>(2);
powershellManagers.add(defaultPowerShellManager);
if (legacyPowerShellManager != null) {
powershellManagers.add(legacyPowerShellManager);
}
return Flux.fromIterable(powershellManagers)
.flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager)
.onErrorResume(t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
"Azure Powershell authentication failed. Error Details: " + t.getMessage()
+ ". To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
null, t));
}
exceptions.add((CredentialUnavailableException) t);
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException("Azure PowerShell authentication failed using default"
+ "powershell(pwsh) with following error: " + current.getMessage()
+ "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)"
+ " with following error: " + last.getMessage(),
last.getCause());
}
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last)));
}));
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(buildOBOFlowParameters(request)))
.map(MsalToken::new));
}
private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request,
PowershellManager powershellManager) {
String scope = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
return Mono.using(() -> powershellManager, manager -> manager.initSession().flatMap(m -> {
String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru";
return m.runCommand(azAccountsCommand).flatMap(output -> {
if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded "
+ "because no valid module file")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Az.Account module with version >= 2.2.0 is not installed. "
+ "It needs to be installed to use Azure PowerShell "
+ "Credential.")));
}
LOGGER.verbose("Az.accounts module was found installed.");
String command = "Get-AzAccessToken -ResourceUrl '"
+ scope
+ "' | ConvertTo-Json";
LOGGER.verbose("Azure Powershell Authentication => Executing the command `{}` in Azure "
+ "Powershell to retrieve the Access Token.", command);
return m.runCommand(command).flatMap(out -> {
if (out.contains("Run Connect-AzAccount to login")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Run Connect-AzAccount to login to Azure account in PowerShell.")));
}
try {
LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the "
+ "received response from Azure Powershell.");
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("Token");
String time = objectMap.get("ExpiresOn");
OffsetDateTime expiresOn = OffsetDateTime.parse(time).withOffsetSameInstant(ZoneOffset.UTC);
return Mono.just(new AccessToken(accessToken, expiresOn));
} catch (IOException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Encountered error when deserializing response from Azure Power Shell.", e)));
}
});
});
}), PowershellManager::close);
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder = buildConfidentialClientParameters(request);
return confidentialClient.acquireToken(builder.build());
}
)).map(MsalToken::new);
}
private SynchronizedAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext requestContext) {
return requestContext.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private ClientCredentialParameters.ClientCredentialParametersBuilder buildConfidentialClientParameters(TokenRequestContext request) {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
builder.claims(customClaimRequest);
}
return builder;
}
public Mono<AccessToken> authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
return managedIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
public Mono<AccessToken> authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
return workloadIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> {
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder
= buildUsernamePasswordFlowParameters(request, username, password);
return pc.acquireToken(userNamePasswordParametersBuilder.build());
}
)).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, false)
).map(MsalToken::new)
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))
.switchIfEmpty(Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, true)
).map(MsalToken::new))
);
}
private CompletableFuture<IAuthenticationResult> acquireTokenFromPublicClientSilently(TokenRequestContext request,
PublicClientApplication pc,
IAccount account,
boolean forceRefresh
) {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (forceRefresh) {
parametersBuilder.forceRefresh(true);
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return pc.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}
private SynchronizedAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) {
return authenticateWithConfidentialClientCache(request, null);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
*
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request, IAccount account) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (account != null) {
parametersBuilder.account(account);
}
try {
return confidentialClient.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}).map(ar -> new MsalToken(ar))
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))));
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return getPublicClientInstance(request).getValue().flatMap(pc ->
Mono.fromFuture(() -> {
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
return pc.acquireToken(parametersBuilder.build());
}).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code.", null, t))
.map(MsalToken::new));
}
/**
* Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken.
*/
public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) {
if (isADFSTenant()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("VsCodeCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported. "
+ "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor();
String credential = null;
try {
credential = accessor.getCredentials("VS Code Azure", cloud);
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters
.builder(new HashSet<>(request.getScopes()), credential);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build()))
.onErrorResume(t -> {
if (t instanceof MsalInteractionRequiredException) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Failed to acquire token with"
+ " VS code credential."
+ " To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
return Mono.error(new ClientAuthenticationException("Failed to acquire token with"
+ " VS code credential", null, t));
})
.map(MsalToken::new)); }
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder =
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
Mono<IAuthenticationResult> acquireToken;
if (clientSecret != null) {
acquireToken = getConfidentialClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
} else {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
acquireToken = publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
}
return acquireToken.onErrorMap(t -> new ClientAuthenticationException(
"Failed to acquire token with authorization code", null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
return getPublicClientInstance(request).getValue().flatMap(pc -> {
if (options.isBrokerEnabled() && options.useDefaultBrokerAccount()) {
return Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, null, false))
.onErrorResume(e -> Mono.empty());
} else {
return Mono.empty();
}
})
.switchIfEmpty(Mono.defer(() -> {
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build())));
}))
.onErrorMap(t -> !(t instanceof ClientAuthenticationException),
t -> {
throw new ClientAuthenticationException("Failed to acquire token with Interactive Browser Authentication.", null, t);
})
.map(MsalToken::new);
}
/**
* Gets token from shared token cache
* */
public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(pc::getAccounts))
.onErrorMap(t -> new CredentialUnavailableException(
"Cannot get accounts from token cache. Error: " + t.getMessage(), t))
.flatMap(set -> {
IAccount requestedAccount;
Map<String, IAccount> accounts = new HashMap<>();
if (set.isEmpty()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("SharedTokenCacheCredential "
+ "authentication unavailable. No accounts were found in the cache.")));
}
for (IAccount cached : set) {
if (username == null || username.equals(cached.username())) {
accounts.putIfAbsent(cached.homeAccountId(), cached);
}
}
if (accounts.isEmpty()) {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. No account matching the specified username: %s was "
+ "found in the cache.", username)));
} else if (accounts.size() > 1) {
if (username == null) {
return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication unavailable. "
+ "Multiple accounts were found in the cache. Use username and tenant id to disambiguate.")
);
} else {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. Multiple accounts matching the specified username: "
+ "%s were found in the cache.", username)));
}
} else {
requestedAccount = accounts.values().iterator().next();
}
return authenticateWithPublicClientCache(request, requestedAccount);
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
String payload = identityEndpoint + "?resource="
+ urlEncode(ScopeUtil.scopesToResource(request.getScopes()))
+ "&api-version=" + ARC_MANAGED_IDENTITY_ENDPOINT_API_VERSION;
URL url = getUrl(payload);
String secretKey = null;
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
} catch (IOException e) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize "
+ "Http URL connection to the endpoint.",
null, e));
}
int status = connection.getResponseCode();
if (status != 401) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401"
+ " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status),
null, e));
}
} finally {
String realm = connection.getHeaderField("WWW-Authenticate");
if (realm == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
int separatorIndex = realm.indexOf("=");
if (separatorIndex == -1) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
String secretKeyPath = realm.substring(separatorIndex + 1);
secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8);
if (connection != null) {
connection.disconnect();
}
if (secretKey == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value"
+ " in the response from Azure Arc Managed Identity Endpoint",
null));
}
}
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Authorization", "Basic " + secretKey);
connection.setRequestProperty("Metadata", "true");
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) {
return clientAssertionAccessor.getValue()
.flatMap(assertionToken -> Mono.fromCallable(() -> authenticateWithExchangeTokenHelper(request, assertionToken)));
}
/**
* Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint,
String identityHeader,
String thumbprint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpsURLConnection connection = null;
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder(1024)
.append(identityEndpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION);
if (clientId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpsURLConnection) url.openConnection();
IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER);
connection.setRequestMethod("GET");
if (identityHeader != null) {
connection.setRequestProperty("Secret", identityHeader);
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
* <p>
* Specifying identity parameters will use the 2019-08-01 endpoint version.
* Specifying MSI parameters will use the 2017-09-01 endpoint version.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param msiEndpoint the MSI endpoint to acquire token from
* @param msiSecret the MSI secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader,
String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String endpoint;
String headerValue;
String endpointVersion;
if (identityEndpoint != null) {
endpoint = identityEndpoint;
headerValue = identityHeader;
endpointVersion = IDENTITY_ENDPOINT_VERSION;
} else {
endpoint = msiEndpoint;
headerValue = msiSecret;
endpointVersion = MSI_ENDPOINT_VERSION;
}
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder(1024)
.append(endpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name()));
if (clientId != null) {
if (endpointVersion.equals(IDENTITY_ENDPOINT_VERSION)) {
payload.append("&client_id=");
} else {
if (headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&clientid=");
}
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
if (endpointVersion.equals(MSI_ENDPOINT_VERSION) && headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (headerValue != null) {
if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) {
connection.setRequestProperty("X-IDENTITY-HEADER", headerValue);
} else {
connection.setRequestProperty("Secret", headerValue);
}
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=2018-02-01");
payload.append("&resource=");
payload.append(urlEncode(resource));
if (clientId != null) {
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
} catch (IOException exception) {
return Mono.error(exception);
}
String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("")
+ IdentityConstants.DEFAULT_IMDS_TOKENPATH;
return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url = getUrl(endpoint + "?" + payload);
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} catch (IOException exception) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Could not connect to the url: " + url + ".", exception));
}
int responseCode;
try {
responseCode = connection.getResponseCode();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
}
if (responseCode == 400) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established.", null));
}
if (responseCode == 403) {
if (connection.getResponseMessage()
.contains("A socket operation was attempted to an unreachable network")) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Managed Identity response was not in the expected format."
+ " See the inner exception for details.",
new Exception(connection.getResponseMessage())));
}
}
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = getRetryTimeoutInMs(retry);
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw LOGGER.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
int getRetryTimeoutInMs(int retry) {
return (int) options.getRetryTimeout()
.apply(Duration.ofSeconds(retry)).toMillis();
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) {
CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>();
completableFuture.completeExceptionally(e);
return completableFuture;
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
private boolean isADFSTenant() {
return ADFS_TENANT.equals(this.tenantId);
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
Mono<AccessToken> accessTokenAsync = authenticateWithExchangeToken(trc);
return accessTokenAsync.map(accessToken -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
}).toFuture();
};
}
} | class IdentityClient extends IdentityClientBase {
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessor;
private final SynchronizedAccessor<PublicClientApplication> publicClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> confidentialClientApplicationAccessorWithCae;
private final SynchronizedAccessor<ConfidentialClientApplication> managedIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<ConfidentialClientApplication> workloadIdentityConfidentialClientApplicationAccessor;
private final SynchronizedAccessor<String> clientAssertionAccessor;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param clientSecret the client secret of the application.
* @param resourceId the resource ID of the application
* @param certificatePath the path to the PKCS12 or PEM certificate of the application.
* @param certificate the PKCS12 or PEM certificate of the application.
* @param certificatePassword the password protecting the PFX certificate.
* @param isSharedTokenCacheCredential Indicate whether the credential is
* {@link com.azure.identity.SharedTokenCacheCredential} or not.
* @param clientAssertionTimeout the timeout to use for the client assertion.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, String clientSecret, String certificatePath,
String clientAssertionFilePath, String resourceId, Supplier<String> clientAssertionSupplier,
byte[] certificate, String certificatePassword, boolean isSharedTokenCacheCredential,
Duration clientAssertionTimeout, IdentityClientOptions options) {
super(tenantId, clientId, clientSecret, certificatePath, clientAssertionFilePath, resourceId,
clientAssertionSupplier, certificate, certificatePassword, isSharedTokenCacheCredential,
clientAssertionTimeout, options);
this.publicClientApplicationAccessor = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, false));
this.publicClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() ->
getPublicClientApplication(isSharedTokenCacheCredential, true));
this.confidentialClientApplicationAccessor = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(false));
this.confidentialClientApplicationAccessorWithCae = new SynchronizedAccessor<>(() -> getConfidentialClientApplication(true));
this.managedIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getManagedIdentityConfidentialClientApplication);
this.workloadIdentityConfidentialClientApplicationAccessor =
new SynchronizedAccessor<>(this::getWorkloadIdentityConfidentialClientApplication);
Duration cacheTimeout = (clientAssertionTimeout == null) ? Duration.ofMinutes(5) : clientAssertionTimeout;
this.clientAssertionAccessor = new SynchronizedAccessor<>(this::parseClientAssertion, cacheTimeout);
}
private Mono<ConfidentialClientApplication> getConfidentialClientApplication(boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getConfidentialClient(enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getManagedIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getManagedIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
private Mono<ConfidentialClientApplication> getWorkloadIdentityConfidentialClientApplication() {
return Mono.defer(() -> {
try {
return Mono.just(super.getWorkloadIdentityConfidentialClient());
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
@Override
Mono<AccessToken> getTokenFromTargetManagedIdentity(TokenRequestContext tokenRequestContext) {
ManagedIdentityParameters parameters = options.getManagedIdentityParameters();
ManagedIdentityType managedIdentityType = options.getManagedIdentityType();
switch (managedIdentityType) {
case APP_SERVICE:
return authenticateToManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getMsiEndpoint(), parameters.getMsiSecret(),
tokenRequestContext);
case SERVICE_FABRIC:
return authenticateToServiceFabricManagedIdentityEndpoint(parameters.getIdentityEndpoint(),
parameters.getIdentityHeader(), parameters.getIdentityServerThumbprint(), tokenRequestContext);
case ARC:
return authenticateToArcManagedIdentityEndpoint(parameters.getIdentityEndpoint(), tokenRequestContext);
case AKS:
return authenticateWithExchangeToken(tokenRequestContext);
case VM:
return authenticateToIMDSEndpoint(tokenRequestContext);
default:
return Mono.error(LOGGER.logExceptionAsError(
new CredentialUnavailableException("Unknown Managed Identity type, authentication not available.")));
}
}
private Mono<String> parseClientAssertion() {
return Mono.fromCallable(() -> {
if (clientAssertionFilePath != null) {
byte[] encoded = Files.readAllBytes(Paths.get(clientAssertionFilePath));
return new String(encoded, StandardCharsets.UTF_8);
} else {
throw LOGGER.logExceptionAsError(new IllegalStateException(
"Client Assertion File Path is not provided."
+ " It should be provided to authenticate with client assertion."
));
}
});
}
private Mono<PublicClientApplication> getPublicClientApplication(boolean sharedTokenCacheCredential, boolean enableCae) {
return Mono.defer(() -> {
try {
return Mono.just(this.getPublicClient(sharedTokenCacheCredential, enableCae));
} catch (RuntimeException e) {
return Mono.error(e);
}
});
}
public Mono<MsalToken> authenticateWithIntelliJ(TokenRequestContext request) {
try {
IntelliJCacheAccessor cacheAccessor = new IntelliJCacheAccessor(options.getIntelliJKeePassDatabasePath());
String cachedRefreshToken = cacheAccessor.getIntelliJCredentialsFromIdentityMsalCache();
if (!CoreUtils.isNullOrEmpty(cachedRefreshToken)) {
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), cachedRefreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
}
IntelliJAuthMethodDetails authDetails;
try {
authDetails = cacheAccessor.getAuthDetailsIfAvailable();
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available.", e)));
}
if (authDetails == null) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please log in with Azure Tools for IntelliJ plugin in the IDE."
+ " Fore more details refer to the troubleshooting guidelines here at"
+ " https:
}
String authType = authDetails.getAuthMethod();
if ("SP".equalsIgnoreCase(authType)) {
Map<String, String> spDetails = cacheAccessor
.getIntellijServicePrincipalDetails(authDetails.getCredFilePath());
String authorityUrl = spDetails.get("authURL") + spDetails.get("tenant");
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(spDetails.get("client"),
ClientCredentialFactory.createFromSecret(spDetails.get("key")))
.authority(authorityUrl)
.instanceDiscovery(options.isInstanceDiscoveryEnabled());
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getExecutorService() != null) {
applicationBuilder.executorService(options.getExecutorService());
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build())).map(MsalToken::new);
} catch (MalformedURLException e) {
return Mono.error(e);
}
} else if ("DC".equalsIgnoreCase(authType)) {
LOGGER.verbose("IntelliJ Authentication => Device Code Authentication scheme detected in Azure Tools"
+ " for IntelliJ Plugin.");
if (isADFSTenant()) {
LOGGER.verbose("IntelliJ Authentication => The input tenant is detected to be ADFS and"
+ " the ADFS tenants are not supported via IntelliJ Authentication currently.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported.")));
}
try {
JsonNode intelliJCredentials = cacheAccessor.getDeviceCodeCredentials();
String refreshToken = intelliJCredentials.get("refreshToken").textValue();
RefreshTokenParameters.RefreshTokenParametersBuilder refreshTokenParametersBuilder =
RefreshTokenParameters.builder(new HashSet<>(request.getScopes()), refreshToken);
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
refreshTokenParametersBuilder.claims(customClaimRequest);
}
return publicClientApplicationAccessor.getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(refreshTokenParametersBuilder.build()))
.map(MsalToken::new));
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
} else {
LOGGER.verbose("IntelliJ Authentication = > Only Service Principal and Device Code Authentication"
+ " schemes are currently supported via IntelliJ Credential currently. Please ensure you used one"
+ " of those schemes from Azure Tools for IntelliJ plugin.");
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("IntelliJ Authentication not available."
+ " Please login with Azure Tools for IntelliJ plugin in the IDE.")));
}
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureCli(TokenRequestContext request) {
StringBuilder azCommand = new StringBuilder("az account get-access-token --output json --resource ");
String scopes = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scopes);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
azCommand.append(scopes);
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azCommand.append(" --tenant ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureCLIAuthentication(azCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure Developer CLI.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzureDeveloperCli(TokenRequestContext request) {
StringBuilder azdCommand = new StringBuilder("azd auth token --output json --scope ");
List<String> scopes = request.getScopes();
if (scopes.size() == 0) {
return Mono.error(LOGGER.logExceptionAsError(new IllegalArgumentException("Missing scope in request")));
}
for (String scope : scopes) {
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
return Mono.error(LOGGER.logExceptionAsError(ex));
}
}
azdCommand.append(String.join(" --scope ", scopes));
try {
String tenant = IdentityUtil.resolveTenantId(tenantId, request, options);
ValidationUtil.validateTenantIdCharacterRange(tenant, LOGGER);
if (!CoreUtils.isNullOrEmpty(tenant) && !tenant.equals(IdentityUtil.DEFAULT_TENANT)) {
azdCommand.append(" --tenant-id ").append(tenant);
}
} catch (ClientAuthenticationException | IllegalArgumentException e) {
return Mono.error(e);
}
try {
AccessToken token = getTokenFromAzureDeveloperCLIAuthentication(azdCommand);
return Mono.just(token);
} catch (RuntimeException e) {
return Mono.error(e instanceof CredentialUnavailableException
? LoggingUtil.logCredentialUnavailableException(LOGGER, options, (CredentialUnavailableException) e)
: LOGGER.logExceptionAsError(e));
}
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithAzurePowerShell(TokenRequestContext request) {
ValidationUtil.validateTenantIdCharacterRange(tenantId, LOGGER);
List<CredentialUnavailableException> exceptions = new ArrayList<>(2);
PowershellManager defaultPowerShellManager = new PowershellManager(Platform.isWindows()
? DEFAULT_WINDOWS_PS_EXECUTABLE : DEFAULT_LINUX_PS_EXECUTABLE);
PowershellManager legacyPowerShellManager = Platform.isWindows()
? new PowershellManager(LEGACY_WINDOWS_PS_EXECUTABLE) : null;
List<PowershellManager> powershellManagers = new ArrayList<>(2);
powershellManagers.add(defaultPowerShellManager);
if (legacyPowerShellManager != null) {
powershellManagers.add(legacyPowerShellManager);
}
return Flux.fromIterable(powershellManagers)
.flatMap(powershellManager -> getAccessTokenFromPowerShell(request, powershellManager)
.onErrorResume(t -> {
if (!t.getClass().getSimpleName().equals("CredentialUnavailableException")) {
return Mono.error(new ClientAuthenticationException(
"Azure Powershell authentication failed. Error Details: " + t.getMessage()
+ ". To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
null, t));
}
exceptions.add((CredentialUnavailableException) t);
return Mono.empty();
}), 1)
.next()
.switchIfEmpty(Mono.defer(() -> {
CredentialUnavailableException last = exceptions.get(exceptions.size() - 1);
for (int z = exceptions.size() - 2; z >= 0; z--) {
CredentialUnavailableException current = exceptions.get(z);
last = new CredentialUnavailableException("Azure PowerShell authentication failed using default"
+ "powershell(pwsh) with following error: " + current.getMessage()
+ "\r\n" + "Azure PowerShell authentication failed using powershell-core(powershell)"
+ " with following error: " + last.getMessage(),
last.getCause());
}
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, (last)));
}));
}
/**
* Asynchronously acquire a token from Active Directory with Azure PowerShell.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithOBO(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> confidentialClient.acquireToken(buildOBOFlowParameters(request)))
.map(MsalToken::new));
}
private Mono<AccessToken> getAccessTokenFromPowerShell(TokenRequestContext request,
PowershellManager powershellManager) {
String scope = ScopeUtil.scopesToResource(request.getScopes());
try {
ScopeUtil.validateScope(scope);
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
return Mono.using(() -> powershellManager, manager -> manager.initSession().flatMap(m -> {
String azAccountsCommand = "Import-Module Az.Accounts -MinimumVersion 2.2.0 -PassThru";
return m.runCommand(azAccountsCommand).flatMap(output -> {
if (output.contains("The specified module 'Az.Accounts' with version '2.2.0' was not loaded "
+ "because no valid module file")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Az.Account module with version >= 2.2.0 is not installed. "
+ "It needs to be installed to use Azure PowerShell "
+ "Credential.")));
}
LOGGER.verbose("Az.accounts module was found installed.");
String command = "Get-AzAccessToken -ResourceUrl '"
+ scope
+ "' | ConvertTo-Json";
LOGGER.verbose("Azure Powershell Authentication => Executing the command `{}` in Azure "
+ "Powershell to retrieve the Access Token.", command);
return m.runCommand(command).flatMap(out -> {
if (out.contains("Run Connect-AzAccount to login")) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Run Connect-AzAccount to login to Azure account in PowerShell.")));
}
try {
LOGGER.verbose("Azure Powershell Authentication => Attempting to deserialize the "
+ "received response from Azure Powershell.");
Map<String, String> objectMap = SERIALIZER_ADAPTER.deserialize(out, Map.class,
SerializerEncoding.JSON);
String accessToken = objectMap.get("Token");
String time = objectMap.get("ExpiresOn");
OffsetDateTime expiresOn = OffsetDateTime.parse(time).withOffsetSameInstant(ZoneOffset.UTC);
return Mono.just(new AccessToken(accessToken, expiresOn));
} catch (IOException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Encountered error when deserializing response from Azure Power Shell.", e)));
}
});
});
}), PowershellManager::close);
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithConfidentialClient(TokenRequestContext request) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder = buildConfidentialClientParameters(request);
return confidentialClient.acquireToken(builder.build());
}
)).map(MsalToken::new);
}
private SynchronizedAccessor<ConfidentialClientApplication> getConfidentialClientInstance(TokenRequestContext requestContext) {
return requestContext.isCaeEnabled()
? confidentialClientApplicationAccessorWithCae : confidentialClientApplicationAccessor;
}
private ClientCredentialParameters.ClientCredentialParametersBuilder buildConfidentialClientParameters(TokenRequestContext request) {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (clientAssertionSupplier != null) {
builder.clientCredential(ClientCredentialFactory
.createFromClientAssertion(clientAssertionSupplier.get()));
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
builder.claims(customClaimRequest);
}
return builder;
}
public Mono<AccessToken> authenticateWithManagedIdentityConfidentialClient(TokenRequestContext request) {
return managedIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
public Mono<AccessToken> authenticateWithWorkloadIdentityConfidentialClient(TokenRequestContext request) {
return workloadIdentityConfidentialClientApplicationAccessor.getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
ClientCredentialParameters.ClientCredentialParametersBuilder builder =
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
return confidentialClient.acquireToken(builder.build());
}
)).onErrorMap(t -> new CredentialUnavailableException("Managed Identity authentication is not available.", t))
.map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> {
UserNamePasswordParameters.UserNamePasswordParametersBuilder userNamePasswordParametersBuilder
= buildUsernamePasswordFlowParameters(request, username, password);
return pc.acquireToken(userNamePasswordParametersBuilder.build());
}
)).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with username and "
+ "password. To mitigate this issue, please refer to the troubleshooting guidelines "
+ "here at https:
null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<MsalToken> authenticateWithPublicClientCache(TokenRequestContext request, IAccount account) {
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, false)
).map(MsalToken::new)
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET)))
.switchIfEmpty(Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, account, true)
).map(MsalToken::new))
);
}
private CompletableFuture<IAuthenticationResult> acquireTokenFromPublicClientSilently(TokenRequestContext request,
PublicClientApplication pc,
IAccount account,
boolean forceRefresh
) {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()));
if (forceRefresh) {
parametersBuilder.forceRefresh(true);
}
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
parametersBuilder.forceRefresh(true);
}
if (account != null) {
parametersBuilder = parametersBuilder.account(account);
}
parametersBuilder.tenant(
IdentityUtil.resolveTenantId(tenantId, request, options));
try {
return pc.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}
private SynchronizedAccessor<PublicClientApplication> getPublicClientInstance(TokenRequestContext request) {
return request.isCaeEnabled()
? publicClientApplicationAccessorWithCae : publicClientApplicationAccessor;
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request) {
return authenticateWithConfidentialClientCache(request, null);
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @param account the account used to log in to acquire the last token
*
* @return a Publisher that emits an AccessToken
*/
@SuppressWarnings("deprecation")
public Mono<AccessToken> authenticateWithConfidentialClientCache(TokenRequestContext request, IAccount account) {
return getConfidentialClientInstance(request).getValue()
.flatMap(confidentialClient -> Mono.fromFuture(() -> {
SilentParameters.SilentParametersBuilder parametersBuilder = SilentParameters.builder(
new HashSet<>(request.getScopes()))
.tenant(IdentityUtil.resolveTenantId(tenantId, request, options));
if (account != null) {
parametersBuilder.account(account);
}
try {
return confidentialClient.acquireTokenSilently(parametersBuilder.build());
} catch (MalformedURLException e) {
return getFailedCompletableFuture(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
}).map(ar -> new MsalToken(ar))
.filter(t -> OffsetDateTime.now().isBefore(t.getExpiresAt().minus(REFRESH_OFFSET))));
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return getPublicClientInstance(request).getValue().flatMap(pc ->
Mono.fromFuture(() -> {
DeviceCodeFlowParameters.DeviceCodeFlowParametersBuilder parametersBuilder = buildDeviceCodeFlowParameters(request, deviceCodeConsumer);
return pc.acquireToken(parametersBuilder.build());
}).onErrorMap(t -> new ClientAuthenticationException("Failed to acquire token with device code.", null, t))
.map(MsalToken::new));
}
/**
* Asynchronously acquire a token from Active Directory with Visual Studio cached refresh token.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken.
*/
public Mono<MsalToken> authenticateWithVsCodeCredential(TokenRequestContext request, String cloud) {
if (isADFSTenant()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("VsCodeCredential "
+ "authentication unavailable. ADFS tenant/authorities are not supported. "
+ "To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
VisualStudioCacheAccessor accessor = new VisualStudioCacheAccessor();
String credential = null;
try {
credential = accessor.getCredentials("VS Code Azure", cloud);
} catch (CredentialUnavailableException e) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options, e));
}
RefreshTokenParameters.RefreshTokenParametersBuilder parametersBuilder = RefreshTokenParameters
.builder(new HashSet<>(request.getScopes()), credential);
if (request.isCaeEnabled() && request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
return getPublicClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(pc.acquireToken(parametersBuilder.build()))
.onErrorResume(t -> {
if (t instanceof MsalInteractionRequiredException) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("Failed to acquire token with"
+ " VS code credential."
+ " To mitigate this issue, please refer to the troubleshooting guidelines here at "
+ "https:
}
return Mono.error(new ClientAuthenticationException("Failed to acquire token with"
+ " VS code credential", null, t));
})
.map(MsalToken::new)); }
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
AuthorizationCodeParameters.AuthorizationCodeParametersBuilder parametersBuilder =
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.tenant(IdentityUtil
.resolveTenantId(tenantId, request, options));
if (request.getClaims() != null) {
ClaimsRequest customClaimRequest = CustomClaimRequest.formatAsClaimsRequest(request.getClaims());
parametersBuilder.claims(customClaimRequest);
}
Mono<IAuthenticationResult> acquireToken;
if (clientSecret != null) {
acquireToken = getConfidentialClientInstance(request).getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
} else {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
acquireToken = publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(parametersBuilder.build())));
}
return acquireToken.onErrorMap(t -> new ClientAuthenticationException(
"Failed to acquire token with authorization code", null, t)).map(MsalToken::new);
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @param redirectUrl the redirect URL to listen on and receive security code
* @param loginHint the username suggestion to pre-fill the login page's username/email address field
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, Integer port,
String redirectUrl, String loginHint) {
URI redirectUri;
String redirect;
if (port != null) {
redirect = HTTP_LOCALHOST + ":" + port;
} else if (redirectUrl != null) {
redirect = redirectUrl;
} else {
redirect = HTTP_LOCALHOST;
}
try {
redirectUri = new URI(redirect);
} catch (URISyntaxException e) {
return Mono.error(LOGGER.logExceptionAsError(new RuntimeException(e)));
}
return getPublicClientInstance(request).getValue().flatMap(pc -> {
if (options.isBrokerEnabled() && options.useDefaultBrokerAccount()) {
return Mono.fromFuture(() ->
acquireTokenFromPublicClientSilently(request, pc, null, false))
.onErrorResume(e -> Mono.empty());
} else {
return Mono.empty();
}
})
.switchIfEmpty(Mono.defer(() -> {
InteractiveRequestParameters.InteractiveRequestParametersBuilder builder =
buildInteractiveRequestParameters(request, loginHint, redirectUri);
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(() -> pc.acquireToken(builder.build())));
}))
.onErrorMap(t -> !(t instanceof ClientAuthenticationException),
t -> {
throw new ClientAuthenticationException("Failed to acquire token with Interactive Browser Authentication.", null, t);
})
.map(MsalToken::new);
}
/**
* Gets token from shared token cache
* */
public Mono<MsalToken> authenticateWithSharedTokenCache(TokenRequestContext request, String username) {
SynchronizedAccessor<PublicClientApplication> publicClient = getPublicClientInstance(request);
return publicClient.getValue()
.flatMap(pc -> Mono.fromFuture(pc::getAccounts))
.onErrorMap(t -> new CredentialUnavailableException(
"Cannot get accounts from token cache. Error: " + t.getMessage(), t))
.flatMap(set -> {
IAccount requestedAccount;
Map<String, IAccount> accounts = new HashMap<>();
if (set.isEmpty()) {
return Mono.error(LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException("SharedTokenCacheCredential "
+ "authentication unavailable. No accounts were found in the cache.")));
}
for (IAccount cached : set) {
if (username == null || username.equals(cached.username())) {
accounts.putIfAbsent(cached.homeAccountId(), cached);
}
}
if (accounts.isEmpty()) {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. No account matching the specified username: %s was "
+ "found in the cache.", username)));
} else if (accounts.size() > 1) {
if (username == null) {
return Mono.error(new RuntimeException("SharedTokenCacheCredential authentication unavailable. "
+ "Multiple accounts were found in the cache. Use username and tenant id to disambiguate.")
);
} else {
return Mono.error(new RuntimeException(String.format("SharedTokenCacheCredential "
+ "authentication unavailable. Multiple accounts matching the specified username: "
+ "%s were found in the cache.", username)));
}
} else {
requestedAccount = accounts.values().iterator().next();
}
return authenticateWithPublicClientCache(request, requestedAccount);
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToArcManagedIdentityEndpoint(String identityEndpoint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
String payload = identityEndpoint + "?resource="
+ urlEncode(ScopeUtil.scopesToResource(request.getScopes()))
+ "&api-version=" + ARC_MANAGED_IDENTITY_ENDPOINT_API_VERSION;
URL url = getUrl(payload);
String secretKey = null;
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
} catch (IOException e) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Failed to initialize "
+ "Http URL connection to the endpoint.",
null, e));
}
int status = connection.getResponseCode();
if (status != 401) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException(String.format("Expected a 401"
+ " Unauthorized response from Azure Arc Managed Identity Endpoint, received: %d", status),
null, e));
}
} finally {
String realm = connection.getHeaderField("WWW-Authenticate");
if (realm == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
int separatorIndex = realm.indexOf("=");
if (separatorIndex == -1) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a correct value"
+ " for WWW-Authenticate header in the response from Azure Arc Managed Identity Endpoint",
null));
}
String secretKeyPath = realm.substring(separatorIndex + 1);
secretKey = new String(Files.readAllBytes(Paths.get(secretKeyPath)), StandardCharsets.UTF_8);
if (connection != null) {
connection.disconnect();
}
if (secretKey == null) {
throw LOGGER.logExceptionAsError(new ClientAuthenticationException("Did not receive a secret value"
+ " in the response from Azure Arc Managed Identity Endpoint",
null));
}
}
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Authorization", "Basic " + secretKey);
connection.setRequestProperty("Metadata", "true");
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Azure Arc Managed Service Identity endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithExchangeToken(TokenRequestContext request) {
return clientAssertionAccessor.getValue()
.flatMap(assertionToken -> Mono.fromCallable(() -> authenticateWithExchangeTokenHelper(request, assertionToken)));
}
/**
* Asynchronously acquire a token from the Azure Service Fabric Managed Service Identity endpoint.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<AccessToken> authenticateToServiceFabricManagedIdentityEndpoint(String identityEndpoint,
String identityHeader,
String thumbprint,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
HttpsURLConnection connection = null;
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder(1024)
.append(identityEndpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(SERVICE_FABRIC_MANAGED_IDENTITY_API_VERSION);
if (clientId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
LOGGER.warning("User assigned managed identities are not supported in the Service Fabric environment.");
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpsURLConnection) url.openConnection();
IdentitySslUtil.addTrustedCertificateThumbprint(connection, thumbprint, LOGGER);
connection.setRequestMethod("GET");
if (identityHeader != null) {
connection.setRequestProperty("Secret", identityHeader);
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
* <p>
* Specifying identity parameters will use the 2019-08-01 endpoint version.
* Specifying MSI parameters will use the 2017-09-01 endpoint version.
*
* @param identityEndpoint the Identity endpoint to acquire token from
* @param identityHeader the identity header to acquire token with
* @param msiEndpoint the MSI endpoint to acquire token from
* @param msiSecret the MSI secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String identityEndpoint, String identityHeader,
String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String endpoint;
String headerValue;
String endpointVersion;
if (identityEndpoint != null) {
endpoint = identityEndpoint;
headerValue = identityHeader;
endpointVersion = IDENTITY_ENDPOINT_VERSION;
} else {
endpoint = msiEndpoint;
headerValue = msiSecret;
endpointVersion = MSI_ENDPOINT_VERSION;
}
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder(1024)
.append(endpoint);
payload.append("?resource=");
payload.append(urlEncode(resource));
payload.append("&api-version=");
payload.append(URLEncoder.encode(endpointVersion, StandardCharsets.UTF_8.name()));
if (clientId != null) {
if (endpointVersion.equals(IDENTITY_ENDPOINT_VERSION)) {
payload.append("&client_id=");
} else {
if (headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&clientid=");
}
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
if (endpointVersion.equals(MSI_ENDPOINT_VERSION) && headerValue == null) {
LOGGER.warning("User assigned managed identities are not supported in the Cloud Shell environment.");
}
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
try {
URL url = getUrl(payload.toString());
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (headerValue != null) {
if (IDENTITY_ENDPOINT_VERSION.equals(endpointVersion)) {
connection.setRequestProperty("X-IDENTITY-HEADER", headerValue);
} else {
connection.setRequestProperty("Secret", headerValue);
}
}
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=2018-02-01");
payload.append("&resource=");
payload.append(urlEncode(resource));
if (clientId != null) {
payload.append("&client_id=");
payload.append(urlEncode(clientId));
}
if (resourceId != null) {
payload.append("&mi_res_id=");
payload.append(urlEncode(resourceId));
}
} catch (IOException exception) {
return Mono.error(exception);
}
String endpoint = TRAILING_FORWARD_SLASHES.matcher(options.getImdsAuthorityHost()).replaceAll("")
+ IdentityConstants.DEFAULT_IMDS_TOKENPATH;
return checkIMDSAvailable(endpoint).flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url = getUrl(endpoint + "?" + payload);
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.setRequestProperty("User-Agent", userAgent);
connection.connect();
return SERIALIZER_ADAPTER.deserialize(connection.getInputStream(), MSIToken.class,
SerializerEncoding.JSON);
} catch (IOException exception) {
if (connection == null) {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Could not connect to the url: " + url + ".", exception));
}
int responseCode;
try {
responseCode = connection.getResponseCode();
} catch (Exception e) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established, "
+ e.getMessage() + ".", e));
}
if (responseCode == 400) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"ManagedIdentityCredential authentication unavailable. "
+ "Connection to IMDS endpoint cannot be established.", null));
}
if (responseCode == 403) {
if (connection.getResponseMessage()
.contains("A socket operation was attempted to an unreachable network")) {
throw LoggingUtil.logCredentialUnavailableException(LOGGER, options,
new CredentialUnavailableException(
"Managed Identity response was not in the expected format."
+ " See the inner exception for details.",
new Exception(connection.getResponseMessage())));
}
}
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = getRetryTimeoutInMs(retry);
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw LOGGER.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw LOGGER.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
int getRetryTimeoutInMs(int retry) {
return (int) options.getRetryTimeout()
.apply(Duration.ofSeconds(retry)).toMillis();
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
LOGGER.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
private CompletableFuture<IAuthenticationResult> getFailedCompletableFuture(Exception e) {
CompletableFuture<IAuthenticationResult> completableFuture = new CompletableFuture<>();
completableFuture.completeExceptionally(e);
return completableFuture;
}
/**
* Get the configured identity client options.
*
* @return the client options.
*/
public IdentityClientOptions getIdentityClientOptions() {
return options;
}
private boolean isADFSTenant() {
return ADFS_TENANT.equals(this.tenantId);
}
Function<AppTokenProviderParameters, CompletableFuture<TokenProviderResult>> getWorkloadIdentityTokenProvider() {
return appTokenProviderParameters -> {
TokenRequestContext trc = new TokenRequestContext()
.setScopes(new ArrayList<>(appTokenProviderParameters.scopes))
.setClaims(appTokenProviderParameters.claims)
.setTenantId(appTokenProviderParameters.tenantId);
Mono<AccessToken> accessTokenAsync = authenticateWithExchangeToken(trc);
return accessTokenAsync.map(accessToken -> {
TokenProviderResult result = new TokenProviderResult();
result.setAccessToken(accessToken.getToken());
result.setTenantId(trc.getTenantId());
result.setExpiresInSeconds(accessToken.getExpiresAt().toEpochSecond());
return result;
}).toFuture();
};
}
} |
We shouldn't need to do this ideally. Appears to be a breaking change on msal. | private static IAuthenticationResult getMockIAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return new IAuthenticationResult() {
@Override
public String accessToken() {
return accessToken;
}
@Override
public String idToken() {
return null;
}
@Override
public IAccount account() {
return new Account();
}
@Override
public ITenantProfile tenantProfile() {
return null;
}
@Override
public String environment() {
return "http:
}
@Override
public String scopes() {
return null;
}
@Override
public Date expiresOnDate() {
return Date.from(expiresOn.plusMinutes(2).toInstant());
}
@Override
public AuthenticationResultMetadata metadata() {
return null;
}
};
} | public AuthenticationResultMetadata metadata() { | private static IAuthenticationResult getMockIAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return new IAuthenticationResult() {
@Override
public String accessToken() {
return accessToken;
}
@Override
public String idToken() {
return null;
}
@Override
public IAccount account() {
return new Account();
}
@Override
public ITenantProfile tenantProfile() {
return null;
}
@Override
public String environment() {
return "http:
}
@Override
public String scopes() {
return null;
}
@Override
public Date expiresOnDate() {
return Date.from(expiresOn.plusMinutes(2).toInstant());
}
@Override
public AuthenticationResultMetadata metadata() {
return null;
}
};
} | class TestUtils {
private static final ConfigurationSource EMPTY_SOURCE = source -> Collections.emptyMap();
/**
* Creates a mock {@link IAuthenticationResult} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a completable future of the result
*/
public static CompletableFuture<IAuthenticationResult> getMockAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return CompletableFuture.completedFuture(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<MsalToken> getMockMsalToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(MsalToken::new);
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static MsalToken getMockMsalTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new MsalToken(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link IAccount} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<IAccount> getMockMsalAccount(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(IAuthenticationResult::account);
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2)));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static AccessToken getMockAccessTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new AccessToken(accessToken, expiresOn.plusMinutes(2));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @param tokenRefreshOffset how long before the actual expiry to refresh the token
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn, Duration tokenRefreshOffset) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2).minus(tokenRefreshOffset)));
}
/**
* Creates a {@link Configuration} with the specified {@link ConfigurationSource} as the only source of
* configurations.
*
* @param configurationSource The configuration source.
* @return A configuration used for testing.
*/
public static Configuration createTestConfiguration(ConfigurationSource configurationSource) {
return new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, configurationSource).build();
}
private TestUtils() {
}
static class Account implements IAccount {
static final long serialVersionUID = 1L;
@Override
public String homeAccountId() {
return UUID.randomUUID().toString();
}
@Override
public String environment() {
return "http:
}
@Override
public String username() {
return "testuser";
}
@Override
public Map<String, ITenantProfile> getTenantProfiles() {
return null;
}
}
} | class TestUtils {
private static final ConfigurationSource EMPTY_SOURCE = source -> Collections.emptyMap();
/**
* Creates a mock {@link IAuthenticationResult} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a completable future of the result
*/
public static CompletableFuture<IAuthenticationResult> getMockAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return CompletableFuture.completedFuture(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<MsalToken> getMockMsalToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(MsalToken::new);
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static MsalToken getMockMsalTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new MsalToken(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link IAccount} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<IAccount> getMockMsalAccount(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(IAuthenticationResult::account);
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2)));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static AccessToken getMockAccessTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new AccessToken(accessToken, expiresOn.plusMinutes(2));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @param tokenRefreshOffset how long before the actual expiry to refresh the token
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn, Duration tokenRefreshOffset) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2).minus(tokenRefreshOffset)));
}
/**
* Creates a {@link Configuration} with the specified {@link ConfigurationSource} as the only source of
* configurations.
*
* @param configurationSource The configuration source.
* @return A configuration used for testing.
*/
public static Configuration createTestConfiguration(ConfigurationSource configurationSource) {
return new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, configurationSource).build();
}
private TestUtils() {
}
static class Account implements IAccount {
static final long serialVersionUID = 1L;
@Override
public String homeAccountId() {
return UUID.randomUUID().toString();
}
@Override
public String environment() {
return "http:
}
@Override
public String username() {
return "testuser";
}
@Override
public Map<String, ITenantProfile> getTenantProfiles() {
return null;
}
}
} |
reported it to them. Leaving this for now as it is just in test code anyway. | private static IAuthenticationResult getMockIAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return new IAuthenticationResult() {
@Override
public String accessToken() {
return accessToken;
}
@Override
public String idToken() {
return null;
}
@Override
public IAccount account() {
return new Account();
}
@Override
public ITenantProfile tenantProfile() {
return null;
}
@Override
public String environment() {
return "http:
}
@Override
public String scopes() {
return null;
}
@Override
public Date expiresOnDate() {
return Date.from(expiresOn.plusMinutes(2).toInstant());
}
@Override
public AuthenticationResultMetadata metadata() {
return null;
}
};
} | public AuthenticationResultMetadata metadata() { | private static IAuthenticationResult getMockIAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return new IAuthenticationResult() {
@Override
public String accessToken() {
return accessToken;
}
@Override
public String idToken() {
return null;
}
@Override
public IAccount account() {
return new Account();
}
@Override
public ITenantProfile tenantProfile() {
return null;
}
@Override
public String environment() {
return "http:
}
@Override
public String scopes() {
return null;
}
@Override
public Date expiresOnDate() {
return Date.from(expiresOn.plusMinutes(2).toInstant());
}
@Override
public AuthenticationResultMetadata metadata() {
return null;
}
};
} | class TestUtils {
private static final ConfigurationSource EMPTY_SOURCE = source -> Collections.emptyMap();
/**
* Creates a mock {@link IAuthenticationResult} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a completable future of the result
*/
public static CompletableFuture<IAuthenticationResult> getMockAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return CompletableFuture.completedFuture(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<MsalToken> getMockMsalToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(MsalToken::new);
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static MsalToken getMockMsalTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new MsalToken(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link IAccount} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<IAccount> getMockMsalAccount(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(IAuthenticationResult::account);
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2)));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static AccessToken getMockAccessTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new AccessToken(accessToken, expiresOn.plusMinutes(2));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @param tokenRefreshOffset how long before the actual expiry to refresh the token
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn, Duration tokenRefreshOffset) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2).minus(tokenRefreshOffset)));
}
/**
* Creates a {@link Configuration} with the specified {@link ConfigurationSource} as the only source of
* configurations.
*
* @param configurationSource The configuration source.
* @return A configuration used for testing.
*/
public static Configuration createTestConfiguration(ConfigurationSource configurationSource) {
return new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, configurationSource).build();
}
private TestUtils() {
}
static class Account implements IAccount {
static final long serialVersionUID = 1L;
@Override
public String homeAccountId() {
return UUID.randomUUID().toString();
}
@Override
public String environment() {
return "http:
}
@Override
public String username() {
return "testuser";
}
@Override
public Map<String, ITenantProfile> getTenantProfiles() {
return null;
}
}
} | class TestUtils {
private static final ConfigurationSource EMPTY_SOURCE = source -> Collections.emptyMap();
/**
* Creates a mock {@link IAuthenticationResult} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a completable future of the result
*/
public static CompletableFuture<IAuthenticationResult> getMockAuthenticationResult(String accessToken, OffsetDateTime expiresOn) {
return CompletableFuture.completedFuture(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<MsalToken> getMockMsalToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(MsalToken::new);
}
/**
* Creates a mock {@link MsalToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static MsalToken getMockMsalTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new MsalToken(getMockIAuthenticationResult(accessToken, expiresOn));
}
/**
* Creates a mock {@link IAccount} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<IAccount> getMockMsalAccount(String accessToken, OffsetDateTime expiresOn) {
return Mono.fromFuture(getMockAuthenticationResult(accessToken, expiresOn))
.map(IAuthenticationResult::account);
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2)));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @return a Mono publisher of the result
*/
public static AccessToken getMockAccessTokenSync(String accessToken, OffsetDateTime expiresOn) {
return new AccessToken(accessToken, expiresOn.plusMinutes(2));
}
/**
* Creates a mock {@link AccessToken} instance.
* @param accessToken the access token to return
* @param expiresOn the expiration time
* @param tokenRefreshOffset how long before the actual expiry to refresh the token
* @return a Mono publisher of the result
*/
public static Mono<AccessToken> getMockAccessToken(String accessToken, OffsetDateTime expiresOn, Duration tokenRefreshOffset) {
return Mono.just(new AccessToken(accessToken, expiresOn.plusMinutes(2).minus(tokenRefreshOffset)));
}
/**
* Creates a {@link Configuration} with the specified {@link ConfigurationSource} as the only source of
* configurations.
*
* @param configurationSource The configuration source.
* @return A configuration used for testing.
*/
public static Configuration createTestConfiguration(ConfigurationSource configurationSource) {
return new ConfigurationBuilder(EMPTY_SOURCE, EMPTY_SOURCE, configurationSource).build();
}
private TestUtils() {
}
static class Account implements IAccount {
static final long serialVersionUID = 1L;
@Override
public String homeAccountId() {
return UUID.randomUUID().toString();
}
@Override
public String environment() {
return "http:
}
@Override
public String username() {
return "testuser";
}
@Override
public Map<String, ITenantProfile> getTenantProfiles() {
return null;
}
}
} |
Should we add a log here? | public void connectFailed(URI uri, SocketAddress sa, IOException ioe) {
} | public void connectFailed(URI uri, SocketAddress sa, IOException ioe) {
} | class JdkHttpClientProxySelector extends ProxySelector {
private final Proxy.Type proxyType;
private final SocketAddress proxyAddress;
private final Pattern nonProxyHostsPattern;
/**
* Creates a new JdkHttpClientProxySelector.
*
* @param proxyType The type of proxy to use.
* @param proxyAddress The address of the proxy.
* @param nonProxyHosts The hosts that should not be proxied.
*/
public JdkHttpClientProxySelector(Proxy.Type proxyType, SocketAddress proxyAddress, String nonProxyHosts) {
this.proxyType = proxyType;
this.proxyAddress = proxyAddress;
this.nonProxyHostsPattern
= (nonProxyHosts == null) ? null : Pattern.compile(nonProxyHosts, Pattern.CASE_INSENSITIVE);
}
@Override
@SuppressWarnings("unchecked")
public List<Proxy> select(URI uri) {
/*
* If the host of the URI matches the nonProxyHostsPattern return no options for proxying, otherwise return the
* proxy.
*/
return (nonProxyHostsPattern == null || !nonProxyHostsPattern.matcher(uri.getHost()).matches())
? Collections.singletonList(new Proxy(proxyType, proxyAddress))
: Collections.EMPTY_LIST;
}
@Override
} | class JdkHttpClientProxySelector extends ProxySelector {
private final Proxy.Type proxyType;
private final SocketAddress proxyAddress;
private final Pattern nonProxyHostsPattern;
/**
* Creates a new JdkHttpClientProxySelector.
*
* @param proxyType The type of proxy to use.
* @param proxyAddress The address of the proxy.
* @param nonProxyHosts The hosts that should not be proxied.
*/
public JdkHttpClientProxySelector(Proxy.Type proxyType, SocketAddress proxyAddress, String nonProxyHosts) {
this.proxyType = proxyType;
this.proxyAddress = proxyAddress;
this.nonProxyHostsPattern
= (nonProxyHosts == null) ? null : Pattern.compile(nonProxyHosts, Pattern.CASE_INSENSITIVE);
}
@Override
@SuppressWarnings("unchecked")
public List<Proxy> select(URI uri) {
/*
* If the host of the URI matches the nonProxyHostsPattern return no options for proxying, otherwise return the
* proxy.
*/
return (nonProxyHostsPattern == null || !nonProxyHostsPattern.matcher(uri.getHost()).matches())
? Collections.singletonList(new Proxy(proxyType, proxyAddress))
: Collections.EMPTY_LIST;
}
@Override
} | |
Yeah, we could, though seems like all the implementations for this method no-op | public void connectFailed(URI uri, SocketAddress sa, IOException ioe) {
} | public void connectFailed(URI uri, SocketAddress sa, IOException ioe) {
} | class JdkHttpClientProxySelector extends ProxySelector {
private final Proxy.Type proxyType;
private final SocketAddress proxyAddress;
private final Pattern nonProxyHostsPattern;
/**
* Creates a new JdkHttpClientProxySelector.
*
* @param proxyType The type of proxy to use.
* @param proxyAddress The address of the proxy.
* @param nonProxyHosts The hosts that should not be proxied.
*/
public JdkHttpClientProxySelector(Proxy.Type proxyType, SocketAddress proxyAddress, String nonProxyHosts) {
this.proxyType = proxyType;
this.proxyAddress = proxyAddress;
this.nonProxyHostsPattern
= (nonProxyHosts == null) ? null : Pattern.compile(nonProxyHosts, Pattern.CASE_INSENSITIVE);
}
@Override
@SuppressWarnings("unchecked")
public List<Proxy> select(URI uri) {
/*
* If the host of the URI matches the nonProxyHostsPattern return no options for proxying, otherwise return the
* proxy.
*/
return (nonProxyHostsPattern == null || !nonProxyHostsPattern.matcher(uri.getHost()).matches())
? Collections.singletonList(new Proxy(proxyType, proxyAddress))
: Collections.EMPTY_LIST;
}
@Override
} | class JdkHttpClientProxySelector extends ProxySelector {
private final Proxy.Type proxyType;
private final SocketAddress proxyAddress;
private final Pattern nonProxyHostsPattern;
/**
* Creates a new JdkHttpClientProxySelector.
*
* @param proxyType The type of proxy to use.
* @param proxyAddress The address of the proxy.
* @param nonProxyHosts The hosts that should not be proxied.
*/
public JdkHttpClientProxySelector(Proxy.Type proxyType, SocketAddress proxyAddress, String nonProxyHosts) {
this.proxyType = proxyType;
this.proxyAddress = proxyAddress;
this.nonProxyHostsPattern
= (nonProxyHosts == null) ? null : Pattern.compile(nonProxyHosts, Pattern.CASE_INSENSITIVE);
}
@Override
@SuppressWarnings("unchecked")
public List<Proxy> select(URI uri) {
/*
* If the host of the URI matches the nonProxyHostsPattern return no options for proxying, otherwise return the
* proxy.
*/
return (nonProxyHostsPattern == null || !nonProxyHostsPattern.matcher(uri.getHost()).matches())
? Collections.singletonList(new Proxy(proxyType, proxyAddress))
: Collections.EMPTY_LIST;
}
@Override
} | |
```suggestion try (JsonReader reader = JsonProviders.createReader(visualization.toStream())) { ``` | public void testVisualization() {
String query = "datatable (s: string, i: long) [ \"a\", 1, \"b\", 2, \"c\", 3 ] "
+ "| render columnchart with (title=\"the chart title\", xtitle=\"the x axis title\")";
LogsQueryResult queryResults = client.queryWorkspaceWithResponse(workspaceId,
query, null, new LogsQueryOptions().setIncludeStatistics(true).setIncludeVisualization(true),
Context.NONE).getValue();
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getVisualization());
BinaryData visualization = queryResults.getVisualization();
try ( JsonReader reader = JsonProviders.createReader(visualization.toStream());) {
Map<String, Object> map = reader.readMap(innerReader -> {
return reader.readUntyped();
});
String title = map.get("title").toString();
String xTitle = map.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
} catch (IOException e) {
Assertions.fail("Failed to read the visualization data.");
}
LinkedHashMap<String, Object> linkedHashMap =
queryResults.getVisualization().toObject(new TypeReference<LinkedHashMap<String, Object>>() {
});
String title = linkedHashMap.get("title").toString();
String xTitle = linkedHashMap.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
} | try ( JsonReader reader = JsonProviders.createReader(visualization.toStream());) { | public void testVisualization() {
String query = "datatable (s: string, i: long) [ \"a\", 1, \"b\", 2, \"c\", 3 ] "
+ "| render columnchart with (title=\"the chart title\", xtitle=\"the x axis title\")";
LogsQueryResult queryResults = client.queryWorkspaceWithResponse(workspaceId,
query, null, new LogsQueryOptions().setIncludeStatistics(true).setIncludeVisualization(true),
Context.NONE).getValue();
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getVisualization());
BinaryData visualization = queryResults.getVisualization();
try (JsonReader reader = JsonProviders.createReader(visualization.toStream())) {
Map<String, Object> map = reader.readMap(innerReader -> {
return reader.readUntyped();
});
String title = map.get("title").toString();
String xTitle = map.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
} catch (IOException e) {
Assertions.fail("Failed to read the visualization data.");
}
LinkedHashMap<String, Object> linkedHashMap =
queryResults.getVisualization().toObject(new TypeReference<LinkedHashMap<String, Object>>() {
});
String title = linkedHashMap.get("title").toString();
String xTitle = linkedHashMap.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
} | class LogsQueryClientTest extends TestProxyTestBase {
private LogsQueryClient client;
private String workspaceId;
private String additionalWorkspaceId;
private String resourceId;
@BeforeEach
public void setup() {
workspaceId = getLogWorkspaceId(interceptorManager.isPlaybackMode());
additionalWorkspaceId = getAdditionalLogWorkspaceId(interceptorManager.isPlaybackMode());
resourceId = getLogResourceId(interceptorManager.isPlaybackMode());
LogsQueryClientBuilder clientBuilder = new LogsQueryClientBuilder()
.retryPolicy(new RetryPolicy(new RetryStrategy() {
@Override
public int getMaxRetries() {
return 0;
}
@Override
public Duration calculateRetryDelay(int i) {
return null;
}
}));
if (getTestMode() == TestMode.PLAYBACK) {
clientBuilder
.credential(request -> Mono.just(new AccessToken("fakeToken", OffsetDateTime.now().plusDays(1))))
.httpClient(getAssertingHttpClient(interceptorManager.getPlaybackClient()));
} else if (getTestMode() == TestMode.RECORD) {
clientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getCredential());
} else if (getTestMode() == TestMode.LIVE) {
clientBuilder.credential(getCredential());
clientBuilder.endpoint(MonitorQueryTestUtils.getLogEndpoint());
}
this.client = clientBuilder
.buildClient();
}
private HttpClient getAssertingHttpClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.skipRequest((request, context) -> false)
.build();
}
private TokenCredential getCredential() {
return new DefaultAzureCredentialBuilder().build();
}
@Test
public void testLogsQuery() {
LogsQueryResult queryResults = client.queryWorkspace(workspaceId, QUERY_STRING,
new QueryTimeInterval(OffsetDateTime.of(LocalDateTime.of(2021, 01, 01, 0, 0), ZoneOffset.UTC),
OffsetDateTime.of(LocalDateTime.of(2021, 06, 10, 0, 0), ZoneOffset.UTC)));
assertEquals(1, queryResults.getAllTables().size());
assertEquals(1200, queryResults.getAllTables().get(0).getAllTableCells().size());
assertEquals(100, queryResults.getAllTables().get(0).getRows().size());
}
@Test
public void testLogsQueryResource() {
LogsQueryResult queryResults = client.queryResource(resourceId, QUERY_STRING,
new QueryTimeInterval(OffsetDateTime.of(LocalDateTime.of(2021, 01, 01, 0, 0), ZoneOffset.UTC),
OffsetDateTime.of(LocalDateTime.of(2021, 06, 10, 0, 0), ZoneOffset.UTC)));
assertEquals(1, queryResults.getAllTables().size());
assertEquals(1200, queryResults.getAllTables().get(0).getAllTableCells().size());
assertEquals(100, queryResults.getAllTables().get(0).getRows().size());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testLogsQueryAllowPartialSuccess() {
final String query = "let dt = datatable (DateTime: datetime, Bool:bool, Guid: guid, Int: "
+ "int, Long:long, Double: double, String: string, Timespan: timespan, Decimal: decimal, Dynamic: dynamic)\n"
+ "[datetime(2015-12-31 23:59:59.9), false, guid(74be27de-1e4e-49d9-b579-fe0b331d3642), 12345, 1, 12345.6789,"
+ " 'string value', 10s, decimal(0.10101), dynamic({\"a\":123, \"b\":\"hello\", \"c\":[1,2,3], \"d\":{}})];"
+ "range x from 1 to 400000 step 1 | extend y=1 | join kind=fullouter dt on $left.y == $right.Long";
final LogsQueryOptions options = new LogsQueryOptions().setAllowPartialErrors(true);
final QueryTimeInterval interval = QueryTimeInterval.LAST_DAY;
final Response<LogsQueryResult> response = client.queryWorkspaceWithResponse(workspaceId, query, interval,
options, Context.NONE);
final LogsQueryResult result = response.getValue();
assertEquals(LogsQueryResultStatus.PARTIAL_FAILURE, result.getQueryResultStatus());
assertNotNull(result.getError());
assertNotNull(result.getTable());
assertTrue(result.getTable().getRows().size() > 0, "Expected there to be rows returned.");
}
@Test
public void testLogsQueryBatch() {
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 2", null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + "| take 3", null);
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(2, responses.size());
assertEquals(1, responses.get(0).getAllTables().size());
assertEquals(24, responses.get(0).getAllTables().get(0).getAllTableCells().size());
assertEquals(2, responses.get(0).getAllTables().get(0).getRows().size());
assertEquals(1, responses.get(1).getAllTables().size());
assertEquals(36, responses.get(1).getAllTables().get(0).getAllTableCells().size());
assertEquals(3, responses.get(1).getAllTables().get(0).getRows().size());
}
@Test
public void testLogsQueryBatchWithServerTimeout() {
LogsQueryClientBuilder clientBuilder = new LogsQueryClientBuilder();
if (getTestMode() == TestMode.PLAYBACK) {
clientBuilder
.credential(request -> Mono.just(new AccessToken("fakeToken", OffsetDateTime.now().plusDays(1))))
.httpClient(getAssertingHttpClient(interceptorManager.getPlaybackClient()));
} else if (getTestMode() == TestMode.RECORD) {
clientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getCredential());
} else if (getTestMode() == TestMode.LIVE) {
clientBuilder.credential(getCredential());
clientBuilder.endpoint(MonitorQueryTestUtils.getLogEndpoint());
}
LogsQueryClient client = clientBuilder
.addPolicy((context, next) -> {
String requestBody = context.getHttpRequest().getBodyAsBinaryData().toString();
Assertions.assertTrue(requestBody.contains("wait=10"));
Assertions.assertTrue(requestBody.contains("wait=20"));
return next.process();
})
.buildClient();
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 2", null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 5", null,
new LogsQueryOptions().setServerTimeout(Duration.ofSeconds(20)));
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + "| take 3", null,
new LogsQueryOptions().setServerTimeout(Duration.ofSeconds(10)));
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(3, responses.size());
assertEquals(1, responses.get(0).getAllTables().size());
assertEquals(24, responses.get(0).getAllTables().get(0).getAllTableCells().size());
assertEquals(2, responses.get(0).getAllTables().get(0).getRows().size());
assertEquals(1, responses.get(1).getAllTables().size());
assertEquals(60, responses.get(1).getAllTables().get(0).getAllTableCells().size());
assertEquals(5, responses.get(1).getAllTables().get(0).getRows().size());
assertEquals(1, responses.get(2).getAllTables().size());
assertEquals(36, responses.get(2).getAllTables().get(0).getAllTableCells().size());
assertEquals(3, responses.get(2).getAllTables().get(0).getRows().size());
}
@Test
public void testMultipleWorkspaces() {
final String multipleWorkspacesQuery = "let dt = datatable (DateTime: datetime, Bool:bool, Guid: guid, Int: "
+ "int, Long:long, Double: double, String: string, Timespan: timespan, Decimal: decimal, Dynamic: dynamic, TenantId: string)\n"
+ "[datetime(2015-12-31 23:59:59.9), false, guid(74be27de-1e4e-49d9-b579-fe0b331d3642), 12345, 1, 12345.6789,"
+ " 'string value', 10s, decimal(0.10101), dynamic({\"a\":123, \"b\":\"hello\", \"c\":[1,2,3], \"d\":{}}), \"" + workspaceId + "\""
+ ", datetime(2015-12-31 23:59:59.9), false, guid(74be27de-1e4e-49d9-b579-fe0b331d3642), 12345, 1, 12345.6789,"
+ " 'string value', 10s, decimal(0.10101), dynamic({\"a\":123, \"b\":\"hello\", \"c\":[1,2,3], \"d\":{}}), \"" + additionalWorkspaceId + "\"];"
+ "range x from 1 to 2 step 1 | extend y=1 | join kind=fullouter dt on $left.y == $right.Long";
LogsQueryResult queryResults = client.queryWorkspaceWithResponse(workspaceId,
multipleWorkspacesQuery, null,
new LogsQueryOptions()
.setAdditionalWorkspaces(Collections.singletonList(additionalWorkspaceId)), Context.NONE)
.getValue();
assertEquals(1, queryResults.getAllTables().size());
assertEquals(2, queryResults
.getAllTables()
.get(0)
.getRows()
.stream()
.map(row -> row.getColumnValue("TenantId").get())
.map(LogsTableCell::getValueAsString)
.distinct()
.count());
}
@Test
public void testBatchQueryPartialSuccess() {
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 2", null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take", null);
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(2, responses.size());
assertEquals(LogsQueryResultStatus.SUCCESS, responses.get(0).getQueryResultStatus());
assertNull(responses.get(0).getError());
assertEquals(LogsQueryResultStatus.FAILURE, responses.get(1).getQueryResultStatus());
assertNotNull(responses.get(1).getError());
assertEquals("BadArgumentError", responses.get(1).getError().getCode());
}
@Test
public void testStatistics() {
LogsQueryResult queryResults = client.queryWorkspaceWithResponse(workspaceId,
QUERY_STRING, null, new LogsQueryOptions().setIncludeStatistics(true), Context.NONE).getValue();
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getStatistics());
}
@Test
public void testStatisticsResourceQuery() {
LogsQueryResult queryResults = client.queryResourceWithResponse(resourceId,
QUERY_STRING, null, new LogsQueryOptions().setIncludeStatistics(true), Context.NONE)
.getValue();
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getStatistics());
}
@Test
@Disabled
public void testBatchStatistics() {
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING, null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING, null,
new LogsQueryOptions().setIncludeStatistics(true));
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(2, responses.size());
assertEquals(LogsQueryResultStatus.SUCCESS, responses.get(0).getQueryResultStatus());
assertNull(responses.get(0).getError());
assertNull(responses.get(0).getStatistics());
assertEquals(LogsQueryResultStatus.SUCCESS, responses.get(1).getQueryResultStatus());
assertNull(responses.get(1).getError());
assertNotNull(responses.get(1).getStatistics());
}
@Test
public void testServerTimeout() {
LogsQueryClientBuilder clientBuilder = new LogsQueryClientBuilder();
if (getTestMode() == TestMode.PLAYBACK) {
clientBuilder
.credential(request -> Mono.just(new AccessToken("fakeToken", OffsetDateTime.now().plusDays(1))))
.httpClient(getAssertingHttpClient(interceptorManager.getPlaybackClient()));
} else if (getTestMode() == TestMode.RECORD) {
clientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getCredential());
} else if (getTestMode() == TestMode.LIVE) {
clientBuilder.credential(getCredential());
clientBuilder.endpoint(MonitorQueryTestUtils.getLogEndpoint());
}
LogsQueryClient client = clientBuilder
.addPolicy((context, next) -> {
Assertions.assertTrue(context.getHttpRequest().getHeaders().get(HttpHeaderName.fromString("Prefer")).getValue().contains("wait=5"));
return next.process();
})
.buildClient();
long count = 5;
client.queryWorkspaceWithResponse(workspaceId, "range x from 1 to " + count + " step 1 | count", null,
new LogsQueryOptions().setServerTimeout(Duration.ofSeconds(5)), Context.NONE);
}
@Test
@Test
public void testVisualizationResourceQuery() {
String query = "datatable (s: string, i: long) [ \"a\", 1, \"b\", 2, \"c\", 3 ] "
+ "| render columnchart with (title=\"the chart title\", xtitle=\"the x axis title\")";
LogsQueryResult queryResults = client.queryResourceWithResponse(resourceId,
query, null, new LogsQueryOptions().setIncludeStatistics(true).setIncludeVisualization(true),
Context.NONE).getValue();
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getVisualization());
BinaryData visualization = queryResults.getVisualization();
try ( JsonReader reader = JsonProviders.createReader(visualization.toStream());) {
Map<String, Object> map = reader.readMap(innerReader -> {
return reader.readUntyped();
});
String title = map.get("title").toString();
String xTitle = map.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
} catch (IOException e) {
Assertions.fail("Failed to read the visualization data.");
}
LinkedHashMap<String, Object> linkedHashMap =
queryResults.getVisualization().toObject(new TypeReference<LinkedHashMap<String, Object>>() {
});
String title = linkedHashMap.get("title").toString();
String xTitle = linkedHashMap.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
}
} | class LogsQueryClientTest extends TestProxyTestBase {
private LogsQueryClient client;
private String workspaceId;
private String additionalWorkspaceId;
private String resourceId;
@BeforeEach
public void setup() {
workspaceId = getLogWorkspaceId(interceptorManager.isPlaybackMode());
additionalWorkspaceId = getAdditionalLogWorkspaceId(interceptorManager.isPlaybackMode());
resourceId = getLogResourceId(interceptorManager.isPlaybackMode());
LogsQueryClientBuilder clientBuilder = new LogsQueryClientBuilder()
.retryPolicy(new RetryPolicy(new RetryStrategy() {
@Override
public int getMaxRetries() {
return 0;
}
@Override
public Duration calculateRetryDelay(int i) {
return null;
}
}));
if (getTestMode() == TestMode.PLAYBACK) {
clientBuilder
.credential(request -> Mono.just(new AccessToken("fakeToken", OffsetDateTime.now().plusDays(1))))
.httpClient(getAssertingHttpClient(interceptorManager.getPlaybackClient()));
} else if (getTestMode() == TestMode.RECORD) {
clientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getCredential());
} else if (getTestMode() == TestMode.LIVE) {
clientBuilder.credential(getCredential());
clientBuilder.endpoint(MonitorQueryTestUtils.getLogEndpoint());
}
this.client = clientBuilder
.buildClient();
}
private HttpClient getAssertingHttpClient(HttpClient httpClient) {
return new AssertingHttpClientBuilder(httpClient)
.assertSync()
.skipRequest((request, context) -> false)
.build();
}
private TokenCredential getCredential() {
return new DefaultAzureCredentialBuilder().build();
}
@Test
public void testLogsQuery() {
LogsQueryResult queryResults = client.queryWorkspace(workspaceId, QUERY_STRING,
new QueryTimeInterval(OffsetDateTime.of(LocalDateTime.of(2021, 01, 01, 0, 0), ZoneOffset.UTC),
OffsetDateTime.of(LocalDateTime.of(2021, 06, 10, 0, 0), ZoneOffset.UTC)));
assertEquals(1, queryResults.getAllTables().size());
assertEquals(1200, queryResults.getAllTables().get(0).getAllTableCells().size());
assertEquals(100, queryResults.getAllTables().get(0).getRows().size());
}
@Test
public void testLogsQueryResource() {
LogsQueryResult queryResults = client.queryResource(resourceId, QUERY_STRING,
new QueryTimeInterval(OffsetDateTime.of(LocalDateTime.of(2021, 01, 01, 0, 0), ZoneOffset.UTC),
OffsetDateTime.of(LocalDateTime.of(2021, 06, 10, 0, 0), ZoneOffset.UTC)));
assertEquals(1, queryResults.getAllTables().size());
assertEquals(1200, queryResults.getAllTables().get(0).getAllTableCells().size());
assertEquals(100, queryResults.getAllTables().get(0).getRows().size());
}
@Test
@DoNotRecord(skipInPlayback = true)
public void testLogsQueryAllowPartialSuccess() {
final String query = "let dt = datatable (DateTime: datetime, Bool:bool, Guid: guid, Int: "
+ "int, Long:long, Double: double, String: string, Timespan: timespan, Decimal: decimal, Dynamic: dynamic)\n"
+ "[datetime(2015-12-31 23:59:59.9), false, guid(74be27de-1e4e-49d9-b579-fe0b331d3642), 12345, 1, 12345.6789,"
+ " 'string value', 10s, decimal(0.10101), dynamic({\"a\":123, \"b\":\"hello\", \"c\":[1,2,3], \"d\":{}})];"
+ "range x from 1 to 400000 step 1 | extend y=1 | join kind=fullouter dt on $left.y == $right.Long";
final LogsQueryOptions options = new LogsQueryOptions().setAllowPartialErrors(true);
final QueryTimeInterval interval = QueryTimeInterval.LAST_DAY;
final Response<LogsQueryResult> response = client.queryWorkspaceWithResponse(workspaceId, query, interval,
options, Context.NONE);
final LogsQueryResult result = response.getValue();
assertEquals(LogsQueryResultStatus.PARTIAL_FAILURE, result.getQueryResultStatus());
assertNotNull(result.getError());
assertNotNull(result.getTable());
assertTrue(result.getTable().getRows().size() > 0, "Expected there to be rows returned.");
}
@Test
public void testLogsQueryBatch() {
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 2", null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + "| take 3", null);
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(2, responses.size());
assertEquals(1, responses.get(0).getAllTables().size());
assertEquals(24, responses.get(0).getAllTables().get(0).getAllTableCells().size());
assertEquals(2, responses.get(0).getAllTables().get(0).getRows().size());
assertEquals(1, responses.get(1).getAllTables().size());
assertEquals(36, responses.get(1).getAllTables().get(0).getAllTableCells().size());
assertEquals(3, responses.get(1).getAllTables().get(0).getRows().size());
}
@Test
public void testLogsQueryBatchWithServerTimeout() {
LogsQueryClientBuilder clientBuilder = new LogsQueryClientBuilder();
if (getTestMode() == TestMode.PLAYBACK) {
clientBuilder
.credential(request -> Mono.just(new AccessToken("fakeToken", OffsetDateTime.now().plusDays(1))))
.httpClient(getAssertingHttpClient(interceptorManager.getPlaybackClient()));
} else if (getTestMode() == TestMode.RECORD) {
clientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getCredential());
} else if (getTestMode() == TestMode.LIVE) {
clientBuilder.credential(getCredential());
clientBuilder.endpoint(MonitorQueryTestUtils.getLogEndpoint());
}
LogsQueryClient client = clientBuilder
.addPolicy((context, next) -> {
String requestBody = context.getHttpRequest().getBodyAsBinaryData().toString();
Assertions.assertTrue(requestBody.contains("wait=10"));
Assertions.assertTrue(requestBody.contains("wait=20"));
return next.process();
})
.buildClient();
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 2", null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 5", null,
new LogsQueryOptions().setServerTimeout(Duration.ofSeconds(20)));
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + "| take 3", null,
new LogsQueryOptions().setServerTimeout(Duration.ofSeconds(10)));
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(3, responses.size());
assertEquals(1, responses.get(0).getAllTables().size());
assertEquals(24, responses.get(0).getAllTables().get(0).getAllTableCells().size());
assertEquals(2, responses.get(0).getAllTables().get(0).getRows().size());
assertEquals(1, responses.get(1).getAllTables().size());
assertEquals(60, responses.get(1).getAllTables().get(0).getAllTableCells().size());
assertEquals(5, responses.get(1).getAllTables().get(0).getRows().size());
assertEquals(1, responses.get(2).getAllTables().size());
assertEquals(36, responses.get(2).getAllTables().get(0).getAllTableCells().size());
assertEquals(3, responses.get(2).getAllTables().get(0).getRows().size());
}
@Test
public void testMultipleWorkspaces() {
final String multipleWorkspacesQuery = "let dt = datatable (DateTime: datetime, Bool:bool, Guid: guid, Int: "
+ "int, Long:long, Double: double, String: string, Timespan: timespan, Decimal: decimal, Dynamic: dynamic, TenantId: string)\n"
+ "[datetime(2015-12-31 23:59:59.9), false, guid(74be27de-1e4e-49d9-b579-fe0b331d3642), 12345, 1, 12345.6789,"
+ " 'string value', 10s, decimal(0.10101), dynamic({\"a\":123, \"b\":\"hello\", \"c\":[1,2,3], \"d\":{}}), \"" + workspaceId + "\""
+ ", datetime(2015-12-31 23:59:59.9), false, guid(74be27de-1e4e-49d9-b579-fe0b331d3642), 12345, 1, 12345.6789,"
+ " 'string value', 10s, decimal(0.10101), dynamic({\"a\":123, \"b\":\"hello\", \"c\":[1,2,3], \"d\":{}}), \"" + additionalWorkspaceId + "\"];"
+ "range x from 1 to 2 step 1 | extend y=1 | join kind=fullouter dt on $left.y == $right.Long";
LogsQueryResult queryResults = client.queryWorkspaceWithResponse(workspaceId,
multipleWorkspacesQuery, null,
new LogsQueryOptions()
.setAdditionalWorkspaces(Collections.singletonList(additionalWorkspaceId)), Context.NONE)
.getValue();
assertEquals(1, queryResults.getAllTables().size());
assertEquals(2, queryResults
.getAllTables()
.get(0)
.getRows()
.stream()
.map(row -> row.getColumnValue("TenantId").get())
.map(LogsTableCell::getValueAsString)
.distinct()
.count());
}
@Test
public void testBatchQueryPartialSuccess() {
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take 2", null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING + " | take", null);
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(2, responses.size());
assertEquals(LogsQueryResultStatus.SUCCESS, responses.get(0).getQueryResultStatus());
assertNull(responses.get(0).getError());
assertEquals(LogsQueryResultStatus.FAILURE, responses.get(1).getQueryResultStatus());
assertNotNull(responses.get(1).getError());
assertEquals("BadArgumentError", responses.get(1).getError().getCode());
}
@Test
public void testStatistics() {
LogsQueryResult queryResults = client.queryWorkspaceWithResponse(workspaceId,
QUERY_STRING, null, new LogsQueryOptions().setIncludeStatistics(true), Context.NONE).getValue();
BinaryData statisticsData = queryResults.getStatistics();
try (JsonReader jsonReader = JsonProviders.createReader(statisticsData.toBytes())) {
Map<String, Object> statisticsMap = jsonReader.readMap(JsonReader::readUntyped);
assertNotNull(statisticsMap);
Object query = statisticsMap.get("query");
if (query instanceof Map<?, ?>) {
Map<?, ?> queryMap = (Map<?, ?>) query;
assertNotNull(queryMap.get("executionTime"));
assertNotNull(queryMap.get("resourceUsage"));
} else {
Assertions.fail("Failed to read the statistics data.");
}
} catch (Exception e) {
Assertions.fail("Failed to read the statistics data.");
}
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getStatistics());
}
@Test
public void testStatisticsResourceQuery() {
LogsQueryResult queryResults = client.queryResourceWithResponse(resourceId,
QUERY_STRING, null, new LogsQueryOptions().setIncludeStatistics(true), Context.NONE)
.getValue();
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getStatistics());
}
@Test
@Disabled
public void testBatchStatistics() {
LogsBatchQuery logsBatchQuery = new LogsBatchQuery();
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING, null);
logsBatchQuery.addWorkspaceQuery(workspaceId, QUERY_STRING, null,
new LogsQueryOptions().setIncludeStatistics(true));
LogsBatchQueryResultCollection batchResultCollection = client
.queryBatchWithResponse(logsBatchQuery, Context.NONE).getValue();
List<LogsBatchQueryResult> responses = batchResultCollection.getBatchResults();
assertEquals(2, responses.size());
assertEquals(LogsQueryResultStatus.SUCCESS, responses.get(0).getQueryResultStatus());
assertNull(responses.get(0).getError());
assertNull(responses.get(0).getStatistics());
assertEquals(LogsQueryResultStatus.SUCCESS, responses.get(1).getQueryResultStatus());
assertNull(responses.get(1).getError());
assertNotNull(responses.get(1).getStatistics());
}
@Test
public void testServerTimeout() {
LogsQueryClientBuilder clientBuilder = new LogsQueryClientBuilder();
if (getTestMode() == TestMode.PLAYBACK) {
clientBuilder
.credential(request -> Mono.just(new AccessToken("fakeToken", OffsetDateTime.now().plusDays(1))))
.httpClient(getAssertingHttpClient(interceptorManager.getPlaybackClient()));
} else if (getTestMode() == TestMode.RECORD) {
clientBuilder
.addPolicy(interceptorManager.getRecordPolicy())
.credential(getCredential());
} else if (getTestMode() == TestMode.LIVE) {
clientBuilder.credential(getCredential());
clientBuilder.endpoint(MonitorQueryTestUtils.getLogEndpoint());
}
LogsQueryClient client = clientBuilder
.addPolicy((context, next) -> {
Assertions.assertTrue(context.getHttpRequest().getHeaders().get(HttpHeaderName.fromString("Prefer")).getValue().contains("wait=5"));
return next.process();
})
.buildClient();
long count = 5;
client.queryWorkspaceWithResponse(workspaceId, "range x from 1 to " + count + " step 1 | count", null,
new LogsQueryOptions().setServerTimeout(Duration.ofSeconds(5)), Context.NONE);
}
@Test
@Test
public void testVisualizationResourceQuery() {
String query = "datatable (s: string, i: long) [ \"a\", 1, \"b\", 2, \"c\", 3 ] "
+ "| render columnchart with (title=\"the chart title\", xtitle=\"the x axis title\")";
LogsQueryResult queryResults = client.queryResourceWithResponse(resourceId,
query, null, new LogsQueryOptions().setIncludeStatistics(true).setIncludeVisualization(true),
Context.NONE).getValue();
assertEquals(1, queryResults.getAllTables().size());
assertNotNull(queryResults.getVisualization());
BinaryData visualization = queryResults.getVisualization();
try (JsonReader reader = JsonProviders.createReader(visualization.toStream())) {
Map<String, Object> map = reader.readMap(innerReader -> {
return reader.readUntyped();
});
String title = map.get("title").toString();
String xTitle = map.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
} catch (IOException e) {
Assertions.fail("Failed to read the visualization data.");
}
LinkedHashMap<String, Object> linkedHashMap =
queryResults.getVisualization().toObject(new TypeReference<LinkedHashMap<String, Object>>() {
});
String title = linkedHashMap.get("title").toString();
String xTitle = linkedHashMap.get("xTitle").toString();
assertEquals("the chart title", title);
assertEquals("the x axis title", xTitle);
}
} |
Should we do a read here to validate that the conatiner has valid config (like if you expect any certain PK + index policy)? | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | CosmosAsyncContainer metadataContainer = | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} |
That has been a major problem in throughput control (that we did not add validation) | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | CosmosAsyncContainer metadataContainer = | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} |
yea make sense - also one question I have here is that should we create the metadata container if it does not exists? I did not add it because it will complicated the story for AAD auth etc. Open for suggestions | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | CosmosAsyncContainer metadataContainer = | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} |
IMO better to provide a help function to create the container - I would never create it dynamically at runtime. | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | CosmosAsyncContainer metadataContainer = | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} |
will do in next PR | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | CosmosAsyncContainer metadataContainer = | private IMetadataReader getMetadataReader() {
switch (this.config.getMetadataConfig().getStorageType()) {
case KAFKA:
return this.kafkaOffsetStorageReader;
case COSMOS:
CosmosAsyncContainer metadataContainer =
this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(this.config.getMetadataConfig().getStorageName());
metadataContainer.read()
.doOnNext(containerResponse -> {
PartitionKeyDefinition partitionKeyDefinition = containerResponse.getProperties().getPartitionKeyDefinition();
if (partitionKeyDefinition.getPaths().size() != 1 || !partitionKeyDefinition.getPaths().get(0).equals("/id")) {
throw new IllegalStateException("Cosmos Metadata container need to be partitioned by /id");
}
})
.block();
return new MetadataCosmosStorageManager(metadataContainer);
default:
throw new IllegalArgumentException("Metadata storage type " + this.config.getMetadataConfig().getStorageType() + " is not supported");
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} | class CosmosSourceConnector extends SourceConnector implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosSourceConnector.class);
private CosmosSourceConfig config;
private CosmosAsyncClient cosmosClient;
private MetadataMonitorThread monitorThread;
private MetadataKafkaStorageManager kafkaOffsetStorageReader;
private IMetadataReader metadataReader;
@Override
public void start(Map<String, String> props) {
LOGGER.info("Starting the kafka cosmos source connector");
this.config = new CosmosSourceConfig(props);
this.cosmosClient = CosmosClientStore.getCosmosClient(this.config.getAccountConfig());
this.kafkaOffsetStorageReader = new MetadataKafkaStorageManager(this.context().offsetStorageReader());
this.metadataReader = this.getMetadataReader();
this.monitorThread = new MetadataMonitorThread(
this.config.getContainersConfig(),
this.config.getMetadataConfig(),
this.context(),
this.metadataReader,
this.cosmosClient
);
this.monitorThread.start();
}
@Override
public Class<? extends Task> taskClass() {
return CosmosSourceTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> taskUnits = this.getAllTaskUnits();
List<Map<String, String>> taskConfigs = this.getFeedRangeTaskConfigs(taskUnits.getRight(), maxTasks);
switch (taskUnits.getLeft().getStorageType()) {
case COSMOS:
updateMetadataRecordsInCosmos(taskUnits.getLeft());
break;
case KAFKA:
taskConfigs
.get(taskConfigs.size() - 1)
.putAll(CosmosSourceTaskConfig.getMetadataTaskUnitConfigMap(taskUnits.getLeft()));
break;
default:
throw new IllegalArgumentException("StorageType " + taskUnits.getLeft().getStorageType() + " is not supported");
}
return taskConfigs;
}
@Override
public void stop() {
LOGGER.info("Stopping Kafka CosmosDB source connector");
if (this.cosmosClient != null) {
LOGGER.debug("Closing cosmos client");
this.cosmosClient.close();
}
if (this.monitorThread != null) {
LOGGER.debug("Closing monitoring thread");
this.monitorThread.close();
}
}
@Override
public ConfigDef config() {
return CosmosSourceConfig.getConfigDef();
}
@Override
public String version() {
return KafkaCosmosConstants.CURRENT_VERSION;
}
private void updateMetadataRecordsInCosmos(MetadataTaskUnit metadataTaskUnit) {
if (metadataTaskUnit.getStorageType() != CosmosMetadataStorageType.COSMOS) {
throw new IllegalStateException("updateMetadataRecordsInCosmos should not be called when metadata storage type is not cosmos");
}
MetadataCosmosStorageManager cosmosProducer = (MetadataCosmosStorageManager) this.metadataReader;
cosmosProducer.createMetadataItems(metadataTaskUnit);
}
private List<Map<String, String>> getFeedRangeTaskConfigs(List<FeedRangeTaskUnit> taskUnits, int maxTasks) {
List<List<FeedRangeTaskUnit>> partitionedTaskUnits = new ArrayList<>();
if (taskUnits.size() <= maxTasks) {
partitionedTaskUnits.addAll(
taskUnits.stream().map(taskUnit -> Arrays.asList(taskUnit)).collect(Collectors.toList()));
} else {
for (int i = 0; i < maxTasks; i++) {
partitionedTaskUnits.add(new ArrayList<>());
}
for (int i = 0; i < taskUnits.size(); i++) {
partitionedTaskUnits.get(i % maxTasks).add(taskUnits.get(i));
}
}
List<Map<String, String>> feedRangeTaskConfigs = new ArrayList<>();
partitionedTaskUnits.forEach(feedRangeTaskUnits -> {
Map<String, String> taskConfigs = this.config.originalsStrings();
taskConfigs.putAll(
CosmosSourceTaskConfig.getFeedRangeTaskUnitsConfigMap(feedRangeTaskUnits));
feedRangeTaskConfigs.add(taskConfigs);
});
return feedRangeTaskConfigs;
}
private Pair<MetadataTaskUnit, List<FeedRangeTaskUnit>> getAllTaskUnits() {
List<CosmosContainerProperties> allContainers = this.monitorThread.getAllContainers().block();
Map<String, String> containerTopicMap = this.getContainersTopicMap(allContainers);
List<FeedRangeTaskUnit> allFeedRangeTaskUnits = new ArrayList<>();
Map<String, List<FeedRange>> updatedContainerToFeedRangesMap = new ConcurrentHashMap<>();
for (CosmosContainerProperties containerProperties : allContainers) {
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap =
this.getEffectiveFeedRangesContinuationMap(
this.config.getContainersConfig().getDatabaseName(),
containerProperties);
updatedContainerToFeedRangesMap.put(
containerProperties.getResourceId(),
effectiveFeedRangesContinuationMap.keySet().stream().collect(Collectors.toList())
);
for (FeedRange effectiveFeedRange : effectiveFeedRangesContinuationMap.keySet()) {
allFeedRangeTaskUnits.add(
new FeedRangeTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
containerProperties.getId(),
containerProperties.getResourceId(),
effectiveFeedRange,
effectiveFeedRangesContinuationMap.get(effectiveFeedRange),
containerTopicMap.get(containerProperties.getId())
)
);
}
}
MetadataTaskUnit metadataTaskUnit =
new MetadataTaskUnit(
this.config.getContainersConfig().getDatabaseName(),
allContainers.stream().map(CosmosContainerProperties::getResourceId).collect(Collectors.toList()),
updatedContainerToFeedRangesMap,
this.config.getMetadataConfig().getStorageName(),
this.config.getMetadataConfig().getStorageType());
return Pair.of(metadataTaskUnit, allFeedRangeTaskUnits);
}
private Map<FeedRange, KafkaCosmosChangeFeedState> getEffectiveFeedRangesContinuationMap(
String databaseName,
CosmosContainerProperties containerProperties) {
List<FeedRange> containerFeedRanges = this.getFeedRanges(containerProperties);
FeedRangesMetadataTopicOffset feedRangesMetadataTopicOffset =
this.metadataReader
.getFeedRangesMetadataOffset(databaseName, containerProperties.getResourceId())
.block().v;
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveFeedRangesContinuationMap = new LinkedHashMap<>();
CosmosAsyncContainer container = this.cosmosClient.getDatabase(databaseName).getContainer(containerProperties.getId());
Flux.fromIterable(containerFeedRanges)
.flatMap(containerFeedRange -> {
if (feedRangesMetadataTopicOffset == null) {
return Mono.just(
Collections.singletonMap(containerFeedRange, (KafkaCosmosChangeFeedState) null));
} else {
return this.getEffectiveContinuationMapForSingleFeedRange(
databaseName,
containerProperties.getResourceId(),
containerFeedRange,
container,
feedRangesMetadataTopicOffset.getFeedRanges());
}
})
.doOnNext(map -> {
effectiveFeedRangesContinuationMap.putAll(map);
})
.blockLast();
return effectiveFeedRangesContinuationMap;
}
private Mono<Map<FeedRange, KafkaCosmosChangeFeedState>> getEffectiveContinuationMapForSingleFeedRange(
String databaseName,
String containerRid,
FeedRange containerFeedRange,
CosmosAsyncContainer cosmosAsyncContainer,
List<FeedRange> rangesFromMetadataTopicOffset) {
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset =
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, containerFeedRange);
Map<FeedRange, KafkaCosmosChangeFeedState> effectiveContinuationMap = new LinkedHashMap<>();
if (feedRangeContinuationTopicOffset != null) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
feedRangeContinuationTopicOffset,
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
return Flux.fromIterable(rangesFromMetadataTopicOffset)
.flatMap(rangeFromOffset -> {
return ImplementationBridgeHelpers
.CosmosAsyncContainerHelper
.getCosmosAsyncContainerAccessor()
.checkFeedRangeOverlapping(cosmosAsyncContainer, rangeFromOffset, containerFeedRange)
.flatMap(overlapped -> {
if (overlapped) {
return Mono.just(rangeFromOffset);
} else {
return Mono.empty();
}
});
})
.collectList()
.flatMap(overlappedFeedRangesFromOffset -> {
if (overlappedFeedRangesFromOffset.size() == 1) {
effectiveContinuationMap.put(
containerFeedRange,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedFeedRangesFromOffset.get(0)),
containerFeedRange));
return Mono.just(effectiveContinuationMap);
}
if (overlappedFeedRangesFromOffset.size() > 1) {
for (FeedRange overlappedRangeFromOffset : overlappedFeedRangesFromOffset) {
effectiveContinuationMap.put(
overlappedRangeFromOffset,
this.getContinuationStateFromOffset(
this.kafkaOffsetStorageReader.getFeedRangeContinuationOffset(databaseName, containerRid, overlappedRangeFromOffset),
overlappedRangeFromOffset));
}
return Mono.just(effectiveContinuationMap);
}
LOGGER.error("Can not find overlapped ranges for feedRange {}", containerFeedRange);
return Mono.error(new IllegalStateException("Can not find overlapped ranges for feedRange " + containerFeedRange));
});
}
private KafkaCosmosChangeFeedState getContinuationStateFromOffset(
FeedRangeContinuationTopicOffset feedRangeContinuationTopicOffset,
FeedRange feedRange) {
KafkaCosmosChangeFeedState changeFeedState =
new KafkaCosmosChangeFeedState(
feedRangeContinuationTopicOffset.getResponseContinuation(),
feedRange,
feedRangeContinuationTopicOffset.getItemLsn());
return changeFeedState;
}
private List<FeedRange> getFeedRanges(CosmosContainerProperties containerProperties) {
return this.cosmosClient
.getDatabase(this.config.getContainersConfig().getDatabaseName())
.getContainer(containerProperties.getId())
.getFeedRanges()
.onErrorMap(throwable ->
KafkaCosmosExceptionsHelper.convertToConnectException(
throwable,
"GetFeedRanges failed for container " + containerProperties.getId()))
.block();
}
private Map<String, String> getContainersTopicMap(List<CosmosContainerProperties> allContainers) {
Map<String, String> topicMapFromConfig =
this.config.getContainersConfig().getContainersTopicMap()
.stream()
.map(containerTopicMapString -> containerTopicMapString.split("
.collect(
Collectors.toMap(
containerTopicMapArray -> containerTopicMapArray[1],
containerTopicMapArray -> containerTopicMapArray[0]));
Map<String, String> effectiveContainersTopicMap = new HashMap<>();
allContainers.forEach(containerProperties -> {
if (topicMapFromConfig.containsKey(containerProperties.getId())) {
effectiveContainersTopicMap.put(
containerProperties.getId(),
topicMapFromConfig.get(containerProperties.getId()));
} else {
effectiveContainersTopicMap.put(
containerProperties.getId(),
containerProperties.getId());
}
});
return effectiveContainersTopicMap;
}
@Override
public Config validate(Map<String, String> connectorConfigs) {
Config config = super.validate(connectorConfigs);
if (config.configValues().stream().anyMatch(cv -> !cv.errorMessages().isEmpty())) {
return config;
}
Map<String, ConfigValue> configValues =
config
.configValues()
.stream()
.collect(Collectors.toMap(ConfigValue::name, Function.identity()));
validateCosmosAccountAuthConfig(configValues);
validateThroughputControlConfig(configValues);
return config;
}
@Override
public void close() {
this.stop();
}
} |
Add null check for httpMethod too. | public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.options = new RequestOptions();
} | this.httpMethod = httpMethod; | public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.requestOptions = new RequestOptions();
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(HttpRequest::setRetryCount);
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions options;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = url;
this.headers = new HttpHeaders();
this.options = new RequestOptions();
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = httpMethod;
return this;
}
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
Objects.requireNonNull(url, "'url' cannot be null");
this.url = url;
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
Objects.requireNonNull(url, "'url' cannot be null");
try {
if (url != null) {
this.url = new URL(url);
}
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getOptions() {
return options;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param options The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setOptions(RequestOptions options) {
Objects.requireNonNull(options, "'options' cannot be null");
this.options = options;
return this;
}
/**
* Creates a copy of this {@link HttpRequest}.
*
* <p>The main purpose of this is so that this {@link HttpRequest} can be changed and the resulting
* {@link HttpRequest} can be a backup. This means that the cloned {@link HttpHeaders} and body must not be able to
* change from side effects of this {@link HttpRequest}.</p>
*
* @return A new {@link HttpRequest} instance with cloned instances of all mutable properties.
*/
public HttpRequest copy() {
return new HttpRequest(httpMethod, url)
.setHeaders(new HttpHeaders(headers))
.setBody(body)
.setOptions(options.copy())
.setRetryCount(retryCount);
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
public int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(new HttpRequestAccessHelper.HttpRequestAccessor() {
@Override
public int getRetryCount(HttpRequest httpRequest) {
return httpRequest.getRetryCount();
}
@Override
public HttpRequest setRetryCount(HttpRequest httpRequest, int retryCount) {
return httpRequest.setRetryCount(retryCount);
}
});
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions requestOptions;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = url;
this.headers = new HttpHeaders();
this.requestOptions = new RequestOptions();
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code httpMethod} is {@code null}.
*/
public HttpRequest setHttpMethod(HttpMethod httpMethod) {
Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
this.httpMethod = httpMethod;
return this;
}
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
Objects.requireNonNull(url, "'url' cannot be null");
this.url = url;
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
Objects.requireNonNull(url, "'url' cannot be null");
try {
this.url = new URL(url);
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getRequestOptions() {
return requestOptions;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param requestOptions The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setRequestOptions(RequestOptions requestOptions) {
Objects.requireNonNull(requestOptions, "'requestOptions' cannot be null");
this.requestOptions = requestOptions;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
private int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} |
This can eventually be RequestOptions.NONE? Maybe add this now, to save updating later. | public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = url;
this.headers = new HttpHeaders();
this.options = new RequestOptions();
} | this.options = new RequestOptions(); | public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = url;
this.headers = new HttpHeaders();
this.requestOptions = new RequestOptions();
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(HttpRequest::setRetryCount);
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions options;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*/
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.options = new RequestOptions();
}
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = httpMethod;
return this;
}
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
Objects.requireNonNull(url, "'url' cannot be null");
this.url = url;
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
Objects.requireNonNull(url, "'url' cannot be null");
try {
this.url = new URL(url);
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getOptions() {
return options;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param options The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setOptions(RequestOptions options) {
Objects.requireNonNull(options, "'options' cannot be null");
this.options = options;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
public int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(new HttpRequestAccessHelper.HttpRequestAccessor() {
@Override
public int getRetryCount(HttpRequest httpRequest) {
return httpRequest.getRetryCount();
}
@Override
public HttpRequest setRetryCount(HttpRequest httpRequest, int retryCount) {
return httpRequest.setRetryCount(retryCount);
}
});
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions requestOptions;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*/
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.requestOptions = new RequestOptions();
}
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code httpMethod} is {@code null}.
*/
public HttpRequest setHttpMethod(HttpMethod httpMethod) {
Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
this.httpMethod = httpMethod;
return this;
}
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
Objects.requireNonNull(url, "'url' cannot be null");
this.url = url;
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
Objects.requireNonNull(url, "'url' cannot be null");
try {
this.url = new URL(url);
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getRequestOptions() {
return requestOptions;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param requestOptions The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setRequestOptions(RequestOptions requestOptions) {
Objects.requireNonNull(requestOptions, "'requestOptions' cannot be null");
this.requestOptions = requestOptions;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
private int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} |
Same here | public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.options = new RequestOptions();
} | this.options = new RequestOptions(); | public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.requestOptions = new RequestOptions();
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(HttpRequest::setRetryCount);
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions options;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = url;
this.headers = new HttpHeaders();
this.options = new RequestOptions();
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = httpMethod;
return this;
}
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
Objects.requireNonNull(url, "'url' cannot be null");
this.url = url;
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
Objects.requireNonNull(url, "'url' cannot be null");
try {
this.url = new URL(url);
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getOptions() {
return options;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param options The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setOptions(RequestOptions options) {
Objects.requireNonNull(options, "'options' cannot be null");
this.options = options;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
public int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(new HttpRequestAccessHelper.HttpRequestAccessor() {
@Override
public int getRetryCount(HttpRequest httpRequest) {
return httpRequest.getRetryCount();
}
@Override
public HttpRequest setRetryCount(HttpRequest httpRequest, int retryCount) {
return httpRequest.setRetryCount(retryCount);
}
});
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions requestOptions;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = url;
this.headers = new HttpHeaders();
this.requestOptions = new RequestOptions();
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code httpMethod} is {@code null}.
*/
public HttpRequest setHttpMethod(HttpMethod httpMethod) {
Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
this.httpMethod = httpMethod;
return this;
}
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
Objects.requireNonNull(url, "'url' cannot be null");
this.url = url;
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
Objects.requireNonNull(url, "'url' cannot be null");
try {
this.url = new URL(url);
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getRequestOptions() {
return requestOptions;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param requestOptions The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setRequestOptions(RequestOptions requestOptions) {
Objects.requireNonNull(requestOptions, "'requestOptions' cannot be null");
this.requestOptions = requestOptions;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
private int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} |
I think there is a typo here, `TO` should probably read `To` | public static void main(String[] args) {
String azureOpenaiKey = Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY");
String endpoint = Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT");
String deploymentOrModelId = "{azure-open-ai-deployment-model-id}";
String fileName = "batman.wav";
Path filePath = Paths.get("src/samples/java/com/azure/ai/openai/resources/" + fileName);
OpenAIClient client = new OpenAIClientBuilder()
.endpoint(endpoint)
.credential(new AzureKeyCredential(azureOpenaiKey))
.buildClient();
byte[] file = BinaryData.fromFile(filePath).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file)
.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentOrModelId, fileName, transcriptionOptions);
System.out.println("Transcription: " + transcription.getText());
} | public static void main(String[] args) {
String azureOpenaiKey = Configuration.getGlobalConfiguration().get("AZURE_OPENAI_KEY");
String endpoint = Configuration.getGlobalConfiguration().get("AZURE_OPENAI_ENDPOINT");
String deploymentOrModelId = "{azure-open-ai-deployment-model-id}";
String fileName = "batman.wav";
Path filePath = Paths.get("src/samples/java/com/azure/ai/openai/resources/" + fileName);
OpenAIClient client = new OpenAIClientBuilder()
.endpoint(endpoint)
.credential(new AzureKeyCredential(azureOpenaiKey))
.buildClient();
byte[] file = BinaryData.fromFile(filePath).toBytes();
AudioTranscriptionOptions transcriptionOptions = new AudioTranscriptionOptions(file)
.setResponseFormat(AudioTranscriptionFormat.JSON);
AudioTranscription transcription = client.getAudioTranscription(deploymentOrModelId, fileName, transcriptionOptions);
System.out.println("Transcription: " + transcription.getText());
} | class AudioTranscriptionSample {
/**
* Runs the sample algorithm and demonstrates how to get the images for a given prompt.
*
* @param args Unused. Arguments to the program.
*/
} | class AudioTranscriptionSample {
/**
* Runs the sample algorithm and demonstrates how to get the images for a given prompt.
*
* @param args Unused. Arguments to the program.
*/
} | |
Got two more confirmed. Currently there's some issue with FunctionAppTest for ACA, reported to service team. Already confirmed with Vinay that the related resources has been deleted. Guess it's fine to update recordings afterwards. | private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
} | new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), | private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
} | class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
} | class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
} |
No. `RequestOptions` is always created with an instance of an empty callback and users cannot set their own callback. We add things to it by using the `Consumer.andThen()` method in our own `addHeader()`, `setBody()`, etc. | public Object invoke(Object proxy, SwaggerMethodParser methodParser, HttpRequest request) {
if (request.getRequestOptions() != null) {
request.getRequestOptions().getRequestCallback().accept(request);
}
if (request.getBody() != null) {
request.setBody(RestProxyUtils.validateLength(request));
}
final Response<?> response = send(request);
return handleRestReturnType(response, methodParser, methodParser.getReturnType());
} | request.getRequestOptions().getRequestCallback().accept(request); | public Object invoke(Object proxy, SwaggerMethodParser methodParser, HttpRequest request) {
if (request.getRequestOptions() != null) {
request.getRequestOptions().getRequestCallback().accept(request);
}
if (request.getBody() != null) {
request.setBody(RestProxyUtils.validateLength(request));
}
final Response<?> response = send(request);
return handleRestReturnType(response, methodParser, methodParser.getReturnType());
} | class RestProxyImpl extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline The HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer The serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser The parser that contains information about the interface describing REST API methods
* to be used.
*/
public RestProxyImpl(HttpPipeline httpPipeline, ObjectSerializer serializer,
SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send.
*
* @return A {@link Response}.
*/
Response<?> send(HttpRequest request) {
return httpPipeline.send(request);
}
@SuppressWarnings({"try", "unused"})
@Override
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* <p>'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.</p>
*
* @param response The Response to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
*
* @return The decodedResponse.
*/
private Response<?> ensureExpectedStatus(Response<?> response, SwaggerMethodParser methodParser) {
int responseStatusCode = response.getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)) {
return response;
}
BinaryData responseData = response.getBody();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
response, null, null);
} else {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), response,
responseBytes, decodeByteArray(response.getBody().toBytes(), response, serializer, methodParser));
}
}
private Object handleRestResponseReturnType(Response<?> response, SwaggerMethodParser methodParser,
Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
try {
response.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return createResponseIfNecessary(response, entityType, null);
} else {
ResponseBodyMode responseBodyMode = null;
RequestOptions requestOptions = response.getRequest().getRequestOptions();
if (requestOptions != null) {
responseBodyMode = requestOptions.getResponseBodyMode();
}
if (responseBodyMode == DESERIALIZE) {
HttpResponseAccessHelper.setValue((HttpResponse<?>) response,
handleResponseBody(response, methodParser, bodyType, response.getBody()));
} else {
HttpResponseAccessHelper.setBodyDeserializer((HttpResponse<?>) response, (body) ->
handleResponseBody(response, methodParser, bodyType, body));
}
Response<?> responseToReturn = createResponseIfNecessary(response, entityType, response.getBody());
if (responseToReturn == null) {
return createResponseIfNecessary(response, entityType, null);
}
return responseToReturn;
}
} else {
return handleResponseBody(response, methodParser, entityType, response.getBody());
}
}
private Object handleResponseBody(Response<?> response, SwaggerMethodParser methodParser, Type entityType,
BinaryData responseBody) {
final int responseStatusCode = response.getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE)
|| TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
result = (responseStatusCode / 100) == 2;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = responseBody != null ? responseBody.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, InputStream.class)) {
result = responseBody.toStream();
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = responseBody;
} else {
result = decodeByteArray(responseBody.toBytes(), response, serializer, methodParser);
}
return result;
}
/**
* Handle the provided HTTP response and return the deserialized value.
*
* @param response The HTTP response to the original HTTP request.
* @param methodParser The SwaggerMethodParser that the request originates from.
* @param returnType The type of value that will be returned.
*
* @return The deserialized result.
*/
private Object handleRestReturnType(Response<?> response, SwaggerMethodParser methodParser, Type returnType) {
final Response<?> expectedResponse = ensureExpectedStatus(response, methodParser);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) {
try {
expectedResponse.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
result = null;
} else {
result = handleRestResponseReturnType(response, methodParser, returnType);
}
return result;
}
public void updateRequest(RequestDataConfiguration requestDataConfiguration, ObjectSerializer serializerAdapter) {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (bodyContentObject == null) {
return;
}
if (supportsJsonSerializable(bodyContentObject.getClass())) {
request.setBody(BinaryData.fromObject(bodyContentObject));
return;
}
if (isJson) {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
} else if (bodyContentObject instanceof byte[]) {
request.setBody(BinaryData.fromBytes((byte[]) bodyContentObject));
} else if (bodyContentObject instanceof String) {
request.setBody(BinaryData.fromString((String) bodyContentObject));
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(BinaryData.fromBytes(((ByteBuffer) bodyContentObject).array()));
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(BinaryData.fromBytes(array));
}
} else {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
}
}
} | class RestProxyImpl extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline The HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer The serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser The parser that contains information about the interface describing REST API methods
* to be used.
*/
public RestProxyImpl(HttpPipeline httpPipeline, ObjectSerializer serializer,
SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send.
*
* @return A {@link Response}.
*/
Response<?> send(HttpRequest request) {
return httpPipeline.send(request);
}
@SuppressWarnings({"try", "unused"})
@Override
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* <p>'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.</p>
*
* @param response The Response to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
*
* @return The decodedResponse.
*/
private Response<?> ensureExpectedStatus(Response<?> response, SwaggerMethodParser methodParser) {
int responseStatusCode = response.getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)) {
return response;
}
BinaryData responseData = response.getBody();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
response, null, null);
} else {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), response,
responseBytes, decodeByteArray(response.getBody().toBytes(), response, serializer, methodParser));
}
}
private Object handleRestResponseReturnType(Response<?> response, SwaggerMethodParser methodParser,
Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
try {
response.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return createResponseIfNecessary(response, entityType, null);
} else {
ResponseBodyMode responseBodyMode = null;
RequestOptions requestOptions = response.getRequest().getRequestOptions();
if (requestOptions != null) {
responseBodyMode = requestOptions.getResponseBodyMode();
}
if (responseBodyMode == DESERIALIZE) {
HttpResponseAccessHelper.setValue((HttpResponse<?>) response,
handleResponseBody(response, methodParser, bodyType, response.getBody()));
} else {
HttpResponseAccessHelper.setBodyDeserializer((HttpResponse<?>) response, (body) ->
handleResponseBody(response, methodParser, bodyType, body));
}
Response<?> responseToReturn = createResponseIfNecessary(response, entityType, response.getBody());
if (responseToReturn == null) {
return createResponseIfNecessary(response, entityType, null);
}
return responseToReturn;
}
} else {
return handleResponseBody(response, methodParser, entityType, response.getBody());
}
}
private Object handleResponseBody(Response<?> response, SwaggerMethodParser methodParser, Type entityType,
BinaryData responseBody) {
final int responseStatusCode = response.getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE)
|| TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
result = (responseStatusCode / 100) == 2;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = responseBody != null ? responseBody.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, InputStream.class)) {
result = responseBody.toStream();
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = responseBody;
} else {
result = decodeByteArray(responseBody.toBytes(), response, serializer, methodParser);
}
return result;
}
/**
* Handle the provided HTTP response and return the deserialized value.
*
* @param response The HTTP response to the original HTTP request.
* @param methodParser The SwaggerMethodParser that the request originates from.
* @param returnType The type of value that will be returned.
*
* @return The deserialized result.
*/
private Object handleRestReturnType(Response<?> response, SwaggerMethodParser methodParser, Type returnType) {
final Response<?> expectedResponse = ensureExpectedStatus(response, methodParser);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) {
try {
expectedResponse.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
result = null;
} else {
result = handleRestResponseReturnType(response, methodParser, returnType);
}
return result;
}
public void updateRequest(RequestDataConfiguration requestDataConfiguration, ObjectSerializer serializerAdapter) {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (bodyContentObject == null) {
return;
}
if (supportsJsonSerializable(bodyContentObject.getClass())) {
request.setBody(BinaryData.fromObject(bodyContentObject));
return;
}
if (isJson) {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
} else if (bodyContentObject instanceof byte[]) {
request.setBody(BinaryData.fromBytes((byte[]) bodyContentObject));
} else if (bodyContentObject instanceof String) {
request.setBody(BinaryData.fromString((String) bodyContentObject));
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(BinaryData.fromBytes(((ByteBuffer) bodyContentObject).array()));
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(BinaryData.fromBytes(array));
}
} else {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
}
}
} |
Manual updated session records. Also deleted two test records whose test is marked `@Disabled`. | private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
} | new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY), | private void addSanitizers() {
List<TestProxySanitizer> sanitizers = new ArrayList<>(Arrays.asList(
new TestProxySanitizer("(?<=/subscriptions/)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("(?<=%2Fsubscriptions%2F)([^/?]+)", ZERO_UUID, TestProxySanitizerType.URL),
new TestProxySanitizer("Retry-After", null, "0", TestProxySanitizerType.HEADER),
new TestProxySanitizer("$..secretText", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..keys[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..adminPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..Password", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..accessSAS", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.osProfile.customData", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..administratorLoginPassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..hubDatabasePassword", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasPrimaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..aliasSecondaryConnectionString", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..primaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secondaryReadonlyMasterKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..passwords[*].value", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$..secret", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.siteConfig.machineKey.decryptionKey", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("(?:AccountKey=)(?<accountKey>.*?)(?:;)", REDACTED_VALUE, TestProxySanitizerType.BODY_REGEX).setGroupForReplace("accountKey"),
new TestProxySanitizer("$.properties.WEBSITE_AUTH_ENCRYPTION_KEY", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY),
new TestProxySanitizer("$.properties.DOCKER_REGISTRY_SERVER_PASSWORD", null, REDACTED_VALUE, TestProxySanitizerType.BODY_KEY)
));
sanitizers.addAll(this.sanitizers);
interceptorManager.addSanitizers(sanitizers);
} | class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
} | class of the manager
* @param httpPipeline the http pipeline
* @param profile the azure profile
* @param <T> the type of the manager
* @return the manager instance
* @throws RuntimeException when field cannot be found or set.
*/
protected <T> T buildManager(Class<T> manager, HttpPipeline httpPipeline, AzureProfile profile) {
try {
Constructor<T> constructor = manager.getDeclaredConstructor(httpPipeline.getClass(), profile.getClass());
setAccessible(constructor);
return constructor.newInstance(httpPipeline, profile);
} catch (ReflectiveOperationException ex) {
throw LOGGER.logExceptionAsError(new RuntimeException(ex));
}
} |
will shading impact the path here? For example, in the spark connector, we shade the Jackson package. I think when shading happens, the class name will also need to include the shading prefix? | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
```suggestion logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance."); ``` Nit: I think this was a typo. | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance."); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
If it was shaded, then the original class would be `<shade>.com.fasterxml.jackson.module.afterburner.AfterburnerModule`, but it's not referred to that in the code today. | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
Fixed - commit pushed | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance."); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
the code currently uses `new AfterburnerModule()` directly so it will still work for shading. But when we changing to use class name, I think we need a way to refer to it properly even when shading happens... | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
shading will also update all strings - not just in import statement and package statement - so it will automatically shae Blackbird as well. | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
Gotcha, missed the fact that simple string will also get updated. Thanks | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
I don't think afterburner is being shaded at all... If it was shaded, I wouldn't need to add it to the classpath for the azure sdk to function. Am I missing something? | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
Sorry, misunderstanding. azure-cosmos is not shading it. But the Spark connector azure-cosmos-spark-* is shading dependencies (azure-core, azure-cosmos, netty, reactor, jackson etc.). And Annie's concerns were related to validating whether this would cause issues int he Spark connector or the Kafka connector (also using shading similar to spark) | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath to for maximum Jackson performance.");
}
} | loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper); | private static void tryToLoadJacksonPerformanceLibrary(ObjectMapper objectMapper) {
boolean loaded = false;
if (JAVA_VERSION != -1) {
if (JAVA_VERSION >= 11) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.blackbird.BlackbirdModule", objectMapper);
}
if (!loaded && JAVA_VERSION < 16) {
loaded = loadModuleIfFound("com.fasterxml.jackson.module.afterburner.AfterburnerModule", objectMapper);
}
}
if (!loaded) {
logger.warn("Neither Afterburner nor Blackbird Jackson module loaded. Consider adding one to your classpath for maximum Jackson performance.");
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} | class Utils {
private final static Logger logger = LoggerFactory.getLogger(Utils.class);
public static final Class<?> byteArrayClass = new byte[0].getClass();
private static final int JAVA_VERSION = getJavaVersion();
private static final int ONE_KB = 1024;
private static final ZoneId GMT_ZONE_ID = ZoneId.of("GMT");
public static final Base64.Encoder Base64Encoder = Base64.getEncoder();
public static final Base64.Decoder Base64Decoder = Base64.getDecoder();
public static final Base64.Encoder Base64UrlEncoder = Base64.getUrlEncoder();
private static final ObjectMapper simpleObjectMapperAllowingDuplicatedProperties =
createAndInitializeObjectMapper(true);
private static final ObjectMapper simpleObjectMapperDisallowingDuplicatedProperties =
createAndInitializeObjectMapper(false);
private static final ObjectMapper durationEnabledObjectMapper = createAndInitializeDurationObjectMapper();
private static ObjectMapper simpleObjectMapper = simpleObjectMapperDisallowingDuplicatedProperties;
private static final TimeBasedGenerator TIME_BASED_GENERATOR =
Generators.timeBasedGenerator(EthernetAddress.constructMulticastAddress());
private static final Pattern SPACE_PATTERN = Pattern.compile("\\s");
private static final DateTimeFormatter RFC_1123_DATE_TIME = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss zzz", Locale.US);
private static ObjectMapper createAndInitializeObjectMapper(boolean allowDuplicateProperties) {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
objectMapper.configure(JsonParser.Feature.ALLOW_TRAILING_COMMA, true);
if (!allowDuplicateProperties) {
objectMapper.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true);
}
objectMapper.configure(DeserializationFeature.ACCEPT_FLOAT_AS_INT, false);
tryToLoadJacksonPerformanceLibrary(objectMapper);
objectMapper.registerModule(new JavaTimeModule());
return objectMapper;
}
private static boolean loadModuleIfFound(String className, ObjectMapper objectMapper) {
try {
Class<?> clazz = Class.forName(className);
Module module = (Module)clazz.getDeclaredConstructor().newInstance();
objectMapper.registerModule(module);
return true;
} catch (ClassNotFoundException e) {
} catch (Exception e) {
logger.warn("Issues loading Jackson performance module " + className, e);
}
return false;
}
private static ObjectMapper createAndInitializeDurationObjectMapper() {
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.registerModule(new SimpleModule()
.addSerializer(Duration.class, ToStringSerializer.instance)
.addSerializer(Instant.class, ToStringSerializer.instance));
return objectMapper;
}
private static int getJavaVersion() {
int version = -1;
try {
String completeJavaVersion = System.getProperty("java.version");
String[] versionElements = completeJavaVersion.split("\\.");
int versionFirstPart = Integer.parseInt(versionElements[0]);
if (versionFirstPart == 1) {
version = Integer.parseInt(versionElements[1]);
} else {
version = versionFirstPart;
}
return version;
} catch (Exception ex) {
logger.warn("Error while fetching java version", ex);
return version;
}
}
public static ByteBuf getUTF8BytesOrNull(String str) {
if (str == null) {
return null;
}
return Unpooled.wrappedBuffer(str.getBytes(StandardCharsets.UTF_8));
}
public static byte[] getUTF8Bytes(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
public static String encodeBase64String(byte[] binaryData) {
String encodedString = Base64Encoder.encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static String decodeBase64String(String encodedString) {
byte[] decodeString = Base64Decoder.decode(encodedString);
return new String(decodeString, StandardCharsets.UTF_8);
}
public static String decodeAsUTF8String(String inputString) {
if (inputString == null || inputString.isEmpty()) {
return inputString;
}
try {
return URLDecoder.decode(inputString, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
logger.warn("Error while decoding input string", e);
return inputString;
}
}
public static String encodeUrlBase64String(byte[] binaryData) {
String encodedString = Base64UrlEncoder.withoutPadding().encodeToString(binaryData);
if (encodedString.endsWith("\r\n")) {
encodedString = encodedString.substring(0, encodedString.length() - 2);
}
return encodedString;
}
public static void configureSimpleObjectMapper(boolean allowDuplicateProperties) {
if (allowDuplicateProperties) {
Utils.simpleObjectMapper = Utils.simpleObjectMapperAllowingDuplicatedProperties;
} else {
Utils.simpleObjectMapper = Utils.simpleObjectMapperDisallowingDuplicatedProperties;
}
}
/**
* Joins the specified paths by appropriately padding them with '/'
*
* @param path1 the first path segment to join.
* @param path2 the second path segment to join.
* @return the concatenated path with '/'
*/
public static String joinPath(String path1, String path2) {
path1 = trimBeginningAndEndingSlashes(path1);
String result = "/" + path1 + "/";
if (!StringUtils.isEmpty(path2)) {
path2 = trimBeginningAndEndingSlashes(path2);
result += path2 + "/";
}
return result;
}
/**
* Trims the beginning and ending '/' from the given path
*
* @param path the path to trim for beginning and ending slashes
* @return the path without beginning and ending '/'
*/
public static String trimBeginningAndEndingSlashes(String path) {
if(path == null) {
return null;
}
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static String createQuery(Map<String, String> queryParameters) {
if (queryParameters == null)
return "";
StringBuilder queryString = new StringBuilder();
for (Map.Entry<String, String> nameValuePair : queryParameters.entrySet()) {
String key = nameValuePair.getKey();
String value = nameValuePair.getValue();
if (key != null && !key.isEmpty()) {
if (queryString.length() > 0) {
queryString.append(RuntimeConstants.Separators.Query[1]);
}
queryString.append(key);
if (value != null) {
queryString.append(RuntimeConstants.Separators.Query[2]);
queryString.append(value);
}
}
}
return queryString.toString();
}
public static URI setQuery(String urlString, String query) {
if (urlString == null)
throw new IllegalStateException("urlString parameter can't be null.");
query = Utils.removeLeadingQuestionMark(query);
try {
if (query != null && !query.isEmpty()) {
return new URI(Utils.addTrailingSlash(urlString) + RuntimeConstants.Separators.Query[0] + query);
} else {
return new URI(Utils.addTrailingSlash(urlString));
}
} catch (URISyntaxException e) {
throw new IllegalStateException("Uri is invalid: ", e);
}
}
/**
* Given the full path to a resource, extract the collection path.
*
* @param resourceFullName the full path to the resource.
* @return the path of the collection in which the resource is.
*/
public static String getCollectionName(String resourceFullName) {
if (resourceFullName != null) {
resourceFullName = Utils.trimBeginningAndEndingSlashes(resourceFullName);
int slashCount = 0;
for (int i = 0; i < resourceFullName.length(); i++) {
if (resourceFullName.charAt(i) == '/') {
slashCount++;
if (slashCount == 4) {
return resourceFullName.substring(0, i);
}
}
}
}
return resourceFullName;
}
public static <T> int getCollectionSize(Collection<T> collection) {
if (collection == null) {
return 0;
}
return collection.size();
}
public static boolean isCollectionChild(ResourceType type) {
return type == ResourceType.Document || type == ResourceType.Attachment || type == ResourceType.Conflict
|| type == ResourceType.StoredProcedure || type == ResourceType.Trigger || type == ResourceType.UserDefinedFunction;
}
public static boolean isWriteOperation(OperationType operationType) {
return operationType == OperationType.Create || operationType == OperationType.Upsert || operationType == OperationType.Delete || operationType == OperationType.Replace
|| operationType == OperationType.ExecuteJavaScript || operationType == OperationType.Batch;
}
private static String addTrailingSlash(String path) {
if (path == null || path.isEmpty())
path = new String(RuntimeConstants.Separators.Url);
else if (path.charAt(path.length() - 1) != RuntimeConstants.Separators.Url[0])
path = path + RuntimeConstants.Separators.Url[0];
return path;
}
private static String removeLeadingQuestionMark(String path) {
if (path == null || path.isEmpty())
return path;
if (path.charAt(0) == RuntimeConstants.Separators.Query[0])
return path.substring(1);
return path;
}
public static boolean isValidConsistency(ConsistencyLevel backendConsistency,
ConsistencyLevel desiredConsistency) {
switch (backendConsistency) {
case STRONG:
return desiredConsistency == ConsistencyLevel.STRONG ||
desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case BOUNDED_STALENESS:
return desiredConsistency == ConsistencyLevel.BOUNDED_STALENESS ||
desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
case SESSION:
case EVENTUAL:
case CONSISTENT_PREFIX:
return desiredConsistency == ConsistencyLevel.SESSION ||
desiredConsistency == ConsistencyLevel.EVENTUAL ||
desiredConsistency == ConsistencyLevel.CONSISTENT_PREFIX;
default:
throw new IllegalArgumentException("backendConsistency");
}
}
public static String getUserAgent() {
return getUserAgent(HttpConstants.Versions.SDK_NAME, HttpConstants.Versions.getSdkVersion());
}
public static String getUserAgent(String sdkName, String sdkVersion) {
String osName = System.getProperty("os.name");
if (osName == null) {
osName = "Unknown";
}
osName = SPACE_PATTERN.matcher(osName).replaceAll("");
return String.format("%s%s/%s %s/%s JRE/%s",
UserAgentContainer.AZSDK_USERAGENT_PREFIX,
sdkName,
sdkVersion,
osName,
System.getProperty("os.version"),
System.getProperty("java.version")
);
}
public static ObjectMapper getSimpleObjectMapper() {
return Utils.simpleObjectMapper;
}
public static ObjectMapper getDurationEnabledObjectMapper() {
return durationEnabledObjectMapper;
}
/**
* Returns Current Time in RFC 1123 format, e.g,
* Fri, 01 Dec 2017 19:22:30 GMT.
*
* @return an instance of STRING
*/
public static String nowAsRFC1123() {
ZonedDateTime now = ZonedDateTime.now(GMT_ZONE_ID);
return Utils.RFC_1123_DATE_TIME.format(now);
}
public static UUID randomUUID() {
return TIME_BASED_GENERATOR.generate();
}
public static String instantAsUTCRFC1123(Instant instant){
return Utils.RFC_1123_DATE_TIME.format(instant.atZone(GMT_ZONE_ID));
}
public static int getValueOrDefault(Integer val, int defaultValue) {
return val != null ? val : defaultValue;
}
public static void checkStateOrThrow(boolean value, String argumentName, String message) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkNotNullOrThrow(Object val, String argumentName, String message) throws NullPointerException {
NullPointerException t = checkNotNullOrReturnException(val, argumentName, message);
if (t != null) {
throw t;
}
}
public static void checkStateOrThrow(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) throws IllegalArgumentException {
IllegalArgumentException t = checkStateOrReturnException(value, argumentName, argumentName, messageTemplateParams);
if (t != null) {
throw t;
}
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String message) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, message));
}
public static IllegalArgumentException checkStateOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new IllegalArgumentException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
private static NullPointerException checkNotNullOrReturnException(Object val, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (val != null) {
return null;
}
return new NullPointerException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
public static BadRequestException checkRequestOrReturnException(boolean value, String argumentName, String messageTemplate, Object... messageTemplateParams) {
if (value) {
return null;
}
return new BadRequestException(String.format("argumentName: %s, message: %s", argumentName, String.format(messageTemplate, messageTemplateParams)));
}
@SuppressWarnings("unchecked")
public static <O, I> O as(I i, Class<O> klass) {
if (i == null) {
return null;
}
if (klass.isInstance(i)) {
return (O) i;
} else {
return null;
}
}
@SuppressWarnings("unchecked")
public static <V> List<V> immutableListOf() {
return Collections.EMPTY_LIST;
}
public static <K, V> Map<K, V>immutableMapOf(K k1, V v1) {
Map<K, V> map = new HashMap<>();
map.put(k1, v1);
map = Collections.unmodifiableMap(map);
return map;
}
public static <V> V firstOrDefault(List<V> list) {
return list.size() > 0? list.get(0) : null ;
}
public static class ValueHolder<V> {
public ValueHolder() {
}
public ValueHolder(V v) {
this.v = v;
}
public V v;
public static <T> ValueHolder<T> initialize(T v) {
return new ValueHolder<>(v);
}
}
public static <K, V> boolean tryGetValue(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.get(key);
return holder.v != null;
}
public static <K, V> boolean tryRemove(Map<K, V> dictionary, K key, ValueHolder<V> holder) {
holder.v = dictionary.remove(key);
return holder.v != null;
}
public static <T> T parse(String itemResponseBodyAsString, Class<T> itemClassType) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(itemResponseBodyAsString, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse string [%s] to POJO.", itemResponseBodyAsString), e);
}
}
public static ObjectNode parseJson(String itemResponseBodyAsString) {
if (StringUtils.isEmpty(itemResponseBodyAsString)) {
return null;
}
try {
return (ObjectNode)getSimpleObjectMapper().readTree(itemResponseBodyAsString);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse json string [%s] to ObjectNode.", itemResponseBodyAsString), e);
}
}
public static <T> T parse(byte[] item, Class<T> itemClassType) {
if (Utils.isEmpty(item)) {
return null;
}
try {
return getSimpleObjectMapper().readValue(item, itemClassType);
} catch (IOException e) {
throw new IllegalStateException(
String.format("Failed to parse byte-array %s to POJO.", new String(item, StandardCharsets.UTF_8)), e);
}
}
public static <T> T parse(JsonNode jsonNode, Class<T> itemClassType, ItemDeserializer itemDeserializer) {
ItemDeserializer effectiveDeserializer = itemDeserializer == null ?
new ItemDeserializer.JsonDeserializer() : itemDeserializer;
return effectiveDeserializer.convert(itemClassType, jsonNode);
}
public static ByteBuffer serializeJsonToByteBuffer(ObjectMapper objectMapper, Object object) {
try {
ByteBufferOutputStream byteBufferOutputStream = new ByteBufferOutputStream(ONE_KB);
objectMapper.writeValue(byteBufferOutputStream, object);
return byteBufferOutputStream.asByteBuffer();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to serialize the object into json", e);
}
}
public static boolean isEmpty(byte[] bytes) {
return bytes == null || bytes.length == 0;
}
public static CosmosChangeFeedRequestOptions getEffectiveCosmosChangeFeedRequestOptions(
CosmosPagedFluxOptions pagedFluxOptions,
CosmosChangeFeedRequestOptions cosmosChangeFeedRequestRequestOptions) {
checkNotNull(
cosmosChangeFeedRequestRequestOptions,
"Argument 'cosmosChangeFeedRequestRequestOptions' must not be null");
return ModelBridgeInternal
.getEffectiveChangeFeedRequestOptions(
cosmosChangeFeedRequestRequestOptions, pagedFluxOptions);
}
static String escapeNonAscii(String partitionKeyJson) {
StringBuilder sb = null;
for (int i = 0; i < partitionKeyJson.length(); i++) {
int val = partitionKeyJson.charAt(i);
if (val > 127) {
if (sb == null) {
sb = new StringBuilder(partitionKeyJson.length());
sb.append(partitionKeyJson, 0, i);
}
sb.append("\\u").append(String.format("%04X", val));
} else {
if (sb != null) {
sb.append(partitionKeyJson.charAt(i));
}
}
}
if (sb == null) {
return partitionKeyJson;
} else {
return sb.toString();
}
}
public static byte[] toByteArray(ByteBuf buf) {
byte[] bytes = new byte[buf.readableBytes()];
buf.readBytes(bytes);
return bytes;
}
public static String toJson(ObjectMapper mapper, ObjectNode object) {
try {
return mapper.writeValueAsString(object);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to convert JSON to STRING", e);
}
}
public static long getMaxIntegratedCacheStalenessInMillis(DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) {
Duration maxIntegratedCacheStaleness = dedicatedGatewayRequestOptions.getMaxIntegratedCacheStaleness();
if (maxIntegratedCacheStaleness.toNanos() > 0 && maxIntegratedCacheStaleness.toMillis() <= 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness granularity is milliseconds");
}
if (maxIntegratedCacheStaleness.toMillis() < 0) {
throw new IllegalArgumentException("MaxIntegratedCacheStaleness duration cannot be negative");
}
return maxIntegratedCacheStaleness.toMillis();
}
} |
Could request callback be null here? | public Object invoke(Object proxy, SwaggerMethodParser methodParser, HttpRequest request) {
if (request.getRequestOptions() != null) {
request.getRequestOptions().getRequestCallback().accept(request);
}
if (request.getBody() != null) {
request.setBody(RestProxyUtils.validateLength(request));
}
final Response<?> response = send(request);
return handleRestReturnType(response, methodParser, methodParser.getReturnType());
} | request.getRequestOptions().getRequestCallback().accept(request); | public Object invoke(Object proxy, SwaggerMethodParser methodParser, HttpRequest request) {
if (request.getRequestOptions() != null) {
request.getRequestOptions().getRequestCallback().accept(request);
}
if (request.getBody() != null) {
request.setBody(RestProxyUtils.validateLength(request));
}
final Response<?> response = send(request);
return handleRestReturnType(response, methodParser, methodParser.getReturnType());
} | class RestProxyImpl extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline The HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer The serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser The parser that contains information about the interface describing REST API methods
* to be used.
*/
public RestProxyImpl(HttpPipeline httpPipeline, ObjectSerializer serializer,
SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send.
*
* @return A {@link Response}.
*/
Response<?> send(HttpRequest request) {
return httpPipeline.send(request);
}
@SuppressWarnings({"try", "unused"})
@Override
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* <p>'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.</p>
*
* @param response The Response to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
*
* @return The decodedResponse.
*/
private Response<?> ensureExpectedStatus(Response<?> response, SwaggerMethodParser methodParser) {
int responseStatusCode = response.getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)) {
return response;
}
BinaryData responseData = response.getBody();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
response, null, null);
} else {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), response,
responseBytes, decodeByteArray(response.getBody().toBytes(), response, serializer, methodParser));
}
}
private Object handleRestResponseReturnType(Response<?> response, SwaggerMethodParser methodParser,
Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
try {
response.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return createResponseIfNecessary(response, entityType, null);
} else {
ResponseBodyMode responseBodyMode = null;
RequestOptions requestOptions = response.getRequest().getRequestOptions();
if (requestOptions != null) {
responseBodyMode = requestOptions.getResponseBodyMode();
}
if (responseBodyMode == DESERIALIZE) {
HttpResponseAccessHelper.setValue((HttpResponse<?>) response,
handleResponseBody(response, methodParser, bodyType, response.getBody()));
} else {
HttpResponseAccessHelper.setBodyDeserializer((HttpResponse<?>) response, (body) ->
handleResponseBody(response, methodParser, bodyType, body));
}
Response<?> responseToReturn = createResponseIfNecessary(response, entityType, response.getBody());
if (responseToReturn == null) {
return createResponseIfNecessary(response, entityType, null);
}
return responseToReturn;
}
} else {
return handleResponseBody(response, methodParser, entityType, response.getBody());
}
}
private Object handleResponseBody(Response<?> response, SwaggerMethodParser methodParser, Type entityType,
BinaryData responseBody) {
final int responseStatusCode = response.getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE)
|| TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
result = (responseStatusCode / 100) == 2;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = responseBody != null ? responseBody.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, InputStream.class)) {
result = responseBody.toStream();
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = responseBody;
} else {
result = decodeByteArray(responseBody.toBytes(), response, serializer, methodParser);
}
return result;
}
/**
* Handle the provided HTTP response and return the deserialized value.
*
* @param response The HTTP response to the original HTTP request.
* @param methodParser The SwaggerMethodParser that the request originates from.
* @param returnType The type of value that will be returned.
*
* @return The deserialized result.
*/
private Object handleRestReturnType(Response<?> response, SwaggerMethodParser methodParser, Type returnType) {
final Response<?> expectedResponse = ensureExpectedStatus(response, methodParser);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) {
try {
expectedResponse.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
result = null;
} else {
result = handleRestResponseReturnType(response, methodParser, returnType);
}
return result;
}
public void updateRequest(RequestDataConfiguration requestDataConfiguration, ObjectSerializer serializerAdapter) {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (bodyContentObject == null) {
return;
}
if (supportsJsonSerializable(bodyContentObject.getClass())) {
request.setBody(BinaryData.fromObject(bodyContentObject));
return;
}
if (isJson) {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
} else if (bodyContentObject instanceof byte[]) {
request.setBody(BinaryData.fromBytes((byte[]) bodyContentObject));
} else if (bodyContentObject instanceof String) {
request.setBody(BinaryData.fromString((String) bodyContentObject));
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(BinaryData.fromBytes(((ByteBuffer) bodyContentObject).array()));
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(BinaryData.fromBytes(array));
}
} else {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
}
}
} | class RestProxyImpl extends RestProxyBase {
/**
* Create a RestProxy.
*
* @param httpPipeline The HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests.
* @param serializer The serializer that will be used to convert response bodies to POJOs.
* @param interfaceParser The parser that contains information about the interface describing REST API methods
* to be used.
*/
public RestProxyImpl(HttpPipeline httpPipeline, ObjectSerializer serializer,
SwaggerInterfaceParser interfaceParser) {
super(httpPipeline, serializer, interfaceParser);
}
/**
* Send the provided request, applying any request policies provided to the HttpClient instance.
*
* @param request the HTTP request to send.
*
* @return A {@link Response}.
*/
Response<?> send(HttpRequest request) {
return httpPipeline.send(request);
}
@SuppressWarnings({"try", "unused"})
@Override
/**
* Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status
* code' OR (2) emits provided response if it's status code ia allowed.
*
* <p>'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[]
* of additional allowed status codes.</p>
*
* @param response The Response to check.
* @param methodParser The method parser that contains information about the service interface method that initiated
* the HTTP request.
*
* @return The decodedResponse.
*/
private Response<?> ensureExpectedStatus(Response<?> response, SwaggerMethodParser methodParser) {
int responseStatusCode = response.getStatusCode();
if (methodParser.isExpectedResponseStatusCode(responseStatusCode)) {
return response;
}
BinaryData responseData = response.getBody();
byte[] responseBytes = responseData == null ? null : responseData.toBytes();
if (responseBytes == null || responseBytes.length == 0) {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode),
response, null, null);
} else {
throw instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), response,
responseBytes, decodeByteArray(response.getBody().toBytes(), response, serializer, methodParser));
}
}
private Object handleRestResponseReturnType(Response<?> response, SwaggerMethodParser methodParser,
Type entityType) {
if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) {
final Type bodyType = TypeUtil.getRestResponseBodyType(entityType);
if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) {
try {
response.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
return createResponseIfNecessary(response, entityType, null);
} else {
ResponseBodyMode responseBodyMode = null;
RequestOptions requestOptions = response.getRequest().getRequestOptions();
if (requestOptions != null) {
responseBodyMode = requestOptions.getResponseBodyMode();
}
if (responseBodyMode == DESERIALIZE) {
HttpResponseAccessHelper.setValue((HttpResponse<?>) response,
handleResponseBody(response, methodParser, bodyType, response.getBody()));
} else {
HttpResponseAccessHelper.setBodyDeserializer((HttpResponse<?>) response, (body) ->
handleResponseBody(response, methodParser, bodyType, body));
}
Response<?> responseToReturn = createResponseIfNecessary(response, entityType, response.getBody());
if (responseToReturn == null) {
return createResponseIfNecessary(response, entityType, null);
}
return responseToReturn;
}
} else {
return handleResponseBody(response, methodParser, entityType, response.getBody());
}
}
private Object handleResponseBody(Response<?> response, SwaggerMethodParser methodParser, Type entityType,
BinaryData responseBody) {
final int responseStatusCode = response.getStatusCode();
final HttpMethod httpMethod = methodParser.getHttpMethod();
final Type returnValueWireType = methodParser.getReturnValueWireType();
final Object result;
if (httpMethod == HttpMethod.HEAD
&& (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE)
|| TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) {
result = (responseStatusCode / 100) == 2;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) {
byte[] responseBodyBytes = responseBody != null ? responseBody.toBytes() : null;
if (returnValueWireType == Base64Url.class) {
responseBodyBytes = new Base64Url(responseBodyBytes).decodedBytes();
}
result = responseBodyBytes != null ? (responseBodyBytes.length == 0 ? null : responseBodyBytes) : null;
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, InputStream.class)) {
result = responseBody.toStream();
} else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) {
result = responseBody;
} else {
result = decodeByteArray(responseBody.toBytes(), response, serializer, methodParser);
}
return result;
}
/**
* Handle the provided HTTP response and return the deserialized value.
*
* @param response The HTTP response to the original HTTP request.
* @param methodParser The SwaggerMethodParser that the request originates from.
* @param returnType The type of value that will be returned.
*
* @return The deserialized result.
*/
private Object handleRestReturnType(Response<?> response, SwaggerMethodParser methodParser, Type returnType) {
final Response<?> expectedResponse = ensureExpectedStatus(response, methodParser);
final Object result;
if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) {
try {
expectedResponse.close();
} catch (IOException e) {
throw LOGGER.logThrowableAsError(new UncheckedIOException(e));
}
result = null;
} else {
result = handleRestResponseReturnType(response, methodParser, returnType);
}
return result;
}
public void updateRequest(RequestDataConfiguration requestDataConfiguration, ObjectSerializer serializerAdapter) {
boolean isJson = requestDataConfiguration.isJson();
HttpRequest request = requestDataConfiguration.getHttpRequest();
Object bodyContentObject = requestDataConfiguration.getBodyContent();
if (bodyContentObject == null) {
return;
}
if (supportsJsonSerializable(bodyContentObject.getClass())) {
request.setBody(BinaryData.fromObject(bodyContentObject));
return;
}
if (isJson) {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
} else if (bodyContentObject instanceof byte[]) {
request.setBody(BinaryData.fromBytes((byte[]) bodyContentObject));
} else if (bodyContentObject instanceof String) {
request.setBody(BinaryData.fromString((String) bodyContentObject));
} else if (bodyContentObject instanceof ByteBuffer) {
if (((ByteBuffer) bodyContentObject).hasArray()) {
request.setBody(BinaryData.fromBytes(((ByteBuffer) bodyContentObject).array()));
} else {
byte[] array = new byte[((ByteBuffer) bodyContentObject).remaining()];
((ByteBuffer) bodyContentObject).get(array);
request.setBody(BinaryData.fromBytes(array));
}
} else {
request.setBody(BinaryData.fromObject(bodyContentObject, serializerAdapter));
}
}
} |
Don't we need to null check this in the constructor as well? | public HttpRequest setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
return this;
} | this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null"); | public HttpRequest setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
return this;
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(new HttpRequestAccessHelper.HttpRequestAccessor() {
@Override
public int getRetryCount(HttpRequest httpRequest) {
return httpRequest.getRetryCount();
}
@Override
public HttpRequest setRetryCount(HttpRequest httpRequest, int retryCount) {
return httpRequest.setRetryCount(retryCount);
}
@Override
public ClientLogger getLogger(HttpRequest httpRequest) {
return httpRequest.getLogger();
}
@Override
public HttpRequest setLogger(HttpRequest httpRequest, ClientLogger logger) {
return httpRequest.setLogger(logger);
}
});
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions requestOptions;
private int retryCount;
private ClientLogger requestLogger;
private ResponseBodyMode responseBodyMode;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = Objects.requireNonNull(url, "'url' cannot be null");
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code httpMethod} is {@code null}.
*/
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
this.url = Objects.requireNonNull(url, "'url' cannot be null");
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
try {
this.url = new URL(Objects.requireNonNull(url, "'url' cannot be null"));
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getRequestOptions() {
return requestOptions;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param requestOptions The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setRequestOptions(RequestOptions requestOptions) {
this.requestOptions = requestOptions;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
private int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
/**
* Gets the {@link ClientLogger} used to log the request and response.
*
* @return The {@link ClientLogger} used to log the request and response.
*/
private ClientLogger getLogger() {
return requestLogger;
}
/**
* Sets the {@link ClientLogger} used to log the request and response.
*
* @param requestLogger The {@link ClientLogger} used to log the request and response.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setLogger(ClientLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(new HttpRequestAccessHelper.HttpRequestAccessor() {
@Override
public int getRetryCount(HttpRequest httpRequest) {
return httpRequest.getRetryCount();
}
@Override
public HttpRequest setRetryCount(HttpRequest httpRequest, int retryCount) {
return httpRequest.setRetryCount(retryCount);
}
});
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions requestOptions;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
this.url = Objects.requireNonNull(url, "'url' cannot be null");
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
setUrl(url);
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code httpMethod} is {@code null}.
*/
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
this.url = Objects.requireNonNull(url, "'url' cannot be null");
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
try {
this.url = new URL(Objects.requireNonNull(url, "'url' cannot be null"));
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getRequestOptions() {
return requestOptions;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param requestOptions The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setRequestOptions(RequestOptions requestOptions) {
this.requestOptions = requestOptions;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
private int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} |
Need to update Javadocs to mention that if `this == RequestOptions.NONE` and this method is called that an IllegalStateException will be thrown. | public RequestOptions addHeader(HttpHeader header) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().add(header));
return this;
} | if (locked) { | public RequestOptions addHeader(HttpHeader header) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot add header."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().add(header));
return this;
} | class RequestOptions {
public static final RequestOptions NONE = new RequestOptions()
.setContext(Context.EMPTY)
.lock();
private static final ClientLogger LOGGER = new ClientLogger(RequestOptions.class);
private Consumer<HttpRequest> requestCallback = request -> {
};
private Context context;
private ResponseBodyMode responseBodyMode;
private boolean locked;
/**
* Creates a new instance of {@link RequestOptions}.
*/
public RequestOptions() {
}
/**
* Gets the request callback, applying all the configurations set on this instance of {@link RequestOptions}.
*
* @return The request callback.
*/
public Consumer<HttpRequest> getRequestCallback() {
return this.requestCallback;
}
/**
* Gets the additional context on the request that is passed during the service call.
*
* @return The additional context that is passed during the service call.
*/
public Context getContext() {
return context;
}
/**
* Gets the configuration indicating how the body of the resulting HTTP response should be handled.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @return The configuration indicating how the body of the resulting HTTP response should be handled.
*/
public ResponseBodyMode getResponseBodyMode() {
return responseBodyMode;
}
/**
* Adds a header to the {@link HttpRequest}.
*
* <p>If a header with the given name exists, the {@code value} is added to the existing header (comma-separated),
* otherwise a new header will be created.</p>
*
* @param header The header key.
*
* @return The updated {@link RequestOptions} object.
*/
/**
* Sets a header on the {@link HttpRequest}.
*
* <p>If a header with the given name exists it is overridden by the new {@code value}.</p>
*
* @param header The header key.
* @param value The header value.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions setHeader(HttpHeaderName header, String value) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().set(header, value));
return this;
}
/**
* Adds a query parameter to the request URL. The parameter name and value will be URL encoded. To use an already
* encoded parameter name and value, call {@code addQueryParam("name", "value", true)}.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions addQueryParam(String parameterName, String value) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
return addQueryParam(parameterName, value, false);
}
/**
* Adds a query parameter to the request URL, specifying whether the parameter is already encoded. A value
* {@code true} for this argument indicates that value of {@link QueryParam
* engine should not encode it. By default, the value will be encoded.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
* @param encoded Whether this query parameter is already encoded.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions addQueryParam(String parameterName, String value, boolean encoded) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.requestCallback = this.requestCallback.andThen(request -> {
String url = request.getUrl().toString();
String encodedParameterName = encoded ? parameterName : UrlEscapers.QUERY_ESCAPER.escape(parameterName);
String encodedParameterValue = encoded ? value : UrlEscapers.QUERY_ESCAPER.escape(value);
request.setUrl(url + (url.contains("?") ? "&" : "?") + encodedParameterName + "=" + encodedParameterValue);
});
return this;
}
/**
* Adds a custom request callback to modify the {@link HttpRequest} before it's sent by the {@link HttpClient}. The
* modifications made on a {@link RequestOptions} object are applied in order on the request.
*
* @param requestCallback The request callback.
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestCallback} is {@code null}.
*/
public RequestOptions addRequestCallback(Consumer<HttpRequest> requestCallback) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
Objects.requireNonNull(requestCallback, "'requestCallback' cannot be null.");
this.requestCallback = this.requestCallback.andThen(requestCallback);
return this;
}
/**
* Sets the body to send as part of the {@link HttpRequest}.
*
* @param requestBody the request body data
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestBody} is {@code null}.
*/
public RequestOptions setBody(BinaryData requestBody) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
Objects.requireNonNull(requestBody, "'requestBody' cannot be null.");
this.requestCallback = this.requestCallback.andThen(request -> request.setBody(requestBody));
return this;
}
/**
* Sets the additional context on the request that is passed during the service call.
*
* @param context Additional context that is passed during the service call.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions setContext(Context context) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.context = context;
return this;
}
/**
* Sets the configuration indicating how the body of the resulting HTTP response should be handled. If {@code null},
* the response body will be handled based on the content type of the response.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @param responseBodyMode The configuration indicating how the body of the resulting HTTP response should be
* handled.
*
* @return The updated {@link RequestOptions} object.
*/
public RequestOptions setResponseBodyMode(ResponseBodyMode responseBodyMode) {
if (locked) {
throw LOGGER.logThrowableAsError(new IllegalStateException("RequestOptions.NONE is immutable."));
}
this.responseBodyMode = responseBodyMode;
return this;
}
/**
* Locks this {@link RequestOptions} to prevent further modifications.
*
* @return This {@link RequestOptions} instance.
*/
private RequestOptions lock() {
locked = true;
return this;
}
} | class RequestOptions {
/**
* Signifies that no options need to be passed to the pipeline.
*/
public static final RequestOptions NONE = new RequestOptions().lock();
private static final ClientLogger LOGGER = new ClientLogger(RequestOptions.class);
private Consumer<HttpRequest> requestCallback = request -> {
};
private Context context;
private ResponseBodyMode responseBodyMode;
private boolean locked;
private ClientLogger logger;
/**
* Creates a new instance of {@link RequestOptions}.
*/
public RequestOptions() {
this.context = Context.EMPTY;
}
/**
* Gets the request callback, applying all the configurations set on this instance of {@link RequestOptions}.
*
* @return The request callback.
*/
public Consumer<HttpRequest> getRequestCallback() {
return this.requestCallback;
}
/**
* Gets the additional context on the request that is passed during the service call.
*
* @return The additional context that is passed during the service call.
*/
public Context getContext() {
return context;
}
/**
* Gets the configuration indicating how the body of the resulting HTTP response should be handled.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @return The configuration indicating how the body of the resulting HTTP response should be handled.
*/
public ResponseBodyMode getResponseBodyMode() {
return responseBodyMode;
}
/**
* Gets the {@link ClientLogger} used to log the request and response.
*
* @return The {@link ClientLogger} used to log the request and response.
*/
public ClientLogger getLogger() {
return logger;
}
/**
* Adds a header to the {@link HttpRequest}.
*
* <p>If a header with the given name exists, the {@code value} is added to the existing header (comma-separated),
* otherwise a new header will be created.</p>
*
* @param header The header key.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
/**
* Sets a header on the {@link HttpRequest}.
*
* <p>If a header with the given name exists it is overridden by the new {@code value}.</p>
*
* @param header The header key.
* @param value The header value.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setHeader(HttpHeaderName header, String value) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set header."));
}
this.requestCallback = this.requestCallback.andThen(request -> request.getHeaders().set(header, value));
return this;
}
/**
* Adds a query parameter to the request URL. The parameter name and value will be URL encoded. To use an already
* encoded parameter name and value, call {@code addQueryParam("name", "value", true)}.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions addQueryParam(String parameterName, String value) {
return addQueryParam(parameterName, value, false);
}
/**
* Adds a query parameter to the request URL, specifying whether the parameter is already encoded. A value
* {@code true} for this argument indicates that value of {@link QueryParam
* engine should not encode it. By default, the value will be encoded.
*
* @param parameterName The name of the query parameter.
* @param value The value of the query parameter.
* @param encoded Whether this query parameter is already encoded.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions addQueryParam(String parameterName, String value, boolean encoded) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot add query param."));
}
this.requestCallback = this.requestCallback.andThen(request -> {
String url = request.getUrl().toString();
String encodedParameterName = encoded ? parameterName : UrlEscapers.QUERY_ESCAPER.escape(parameterName);
String encodedParameterValue = encoded ? value : UrlEscapers.QUERY_ESCAPER.escape(value);
request.setUrl(url + (url.contains("?") ? "&" : "?") + encodedParameterName + "=" + encodedParameterValue);
});
return this;
}
/**
* Adds a custom request callback to modify the {@link HttpRequest} before it's sent by the {@link HttpClient}. The
* modifications made on a {@link RequestOptions} object are applied in order on the request.
*
* @param requestCallback The request callback.
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestCallback} is {@code null}.
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions addRequestCallback(Consumer<HttpRequest> requestCallback) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot add request callback."));
}
Objects.requireNonNull(requestCallback, "'requestCallback' cannot be null.");
this.requestCallback = this.requestCallback.andThen(requestCallback);
return this;
}
/**
* Sets the body to send as part of the {@link HttpRequest}.
*
* @param requestBody the request body data
*
* @return The updated {@link RequestOptions} object.
*
* @throws NullPointerException If {@code requestBody} is {@code null}.
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setBody(BinaryData requestBody) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set body."));
}
Objects.requireNonNull(requestBody, "'requestBody' cannot be null.");
this.requestCallback = this.requestCallback.andThen(request -> request.setBody(requestBody));
return this;
}
/**
* Sets the additional context on the request that is passed during the service call.
*
* @param context Additional context that is passed during the service call.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setContext(Context context) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set context."));
}
this.context = context;
return this;
}
/**
* Sets the configuration indicating how the body of the resulting HTTP response should be handled. If {@code null},
* the response body will be handled based on the content type of the response.
*
* <p>For more information about the options for handling an HTTP response body, see {@link ResponseBodyMode}.</p>
*
* @param responseBodyMode The configuration indicating how the body of the resulting HTTP response should be
* handled.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setResponseBodyMode(ResponseBodyMode responseBodyMode) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set response body mode."));
}
this.responseBodyMode = responseBodyMode;
return this;
}
/**
* Sets the {@link ClientLogger} used to log the request and response.
*
* @param logger The {@link ClientLogger} used to log the request and response.
*
* @return The updated {@link RequestOptions} object.
*
* @throws IllegalStateException if this instance is {@link RequestOptions
*/
public RequestOptions setLogger(ClientLogger logger) {
if (locked) {
throw LOGGER.logThrowableAsError(
new IllegalStateException("RequestOptions.NONE is immutable. Cannot set logger."));
}
this.logger = logger;
return this;
}
/**
* Locks this {@link RequestOptions} to prevent further modifications.
*
* @return This {@link RequestOptions} instance.
*/
private RequestOptions lock() {
locked = true;
return this;
}
} |
Right, good catch! | public HttpRequest setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
return this;
} | this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null"); | public HttpRequest setHttpMethod(HttpMethod httpMethod) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
return this;
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(new HttpRequestAccessHelper.HttpRequestAccessor() {
@Override
public int getRetryCount(HttpRequest httpRequest) {
return httpRequest.getRetryCount();
}
@Override
public HttpRequest setRetryCount(HttpRequest httpRequest, int retryCount) {
return httpRequest.setRetryCount(retryCount);
}
@Override
public ClientLogger getLogger(HttpRequest httpRequest) {
return httpRequest.getLogger();
}
@Override
public HttpRequest setLogger(HttpRequest httpRequest, ClientLogger logger) {
return httpRequest.setLogger(logger);
}
});
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions requestOptions;
private int retryCount;
private ClientLogger requestLogger;
private ResponseBodyMode responseBodyMode;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = httpMethod;
this.url = Objects.requireNonNull(url, "'url' cannot be null");
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = httpMethod;
setUrl(url);
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code httpMethod} is {@code null}.
*/
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
this.url = Objects.requireNonNull(url, "'url' cannot be null");
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
try {
this.url = new URL(Objects.requireNonNull(url, "'url' cannot be null"));
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getRequestOptions() {
return requestOptions;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param requestOptions The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setRequestOptions(RequestOptions requestOptions) {
this.requestOptions = requestOptions;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
private int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
/**
* Gets the {@link ClientLogger} used to log the request and response.
*
* @return The {@link ClientLogger} used to log the request and response.
*/
private ClientLogger getLogger() {
return requestLogger;
}
/**
* Sets the {@link ClientLogger} used to log the request and response.
*
* @param requestLogger The {@link ClientLogger} used to log the request and response.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setLogger(ClientLogger requestLogger) {
this.requestLogger = requestLogger;
return this;
}
} | class HttpRequest {
private static final ClientLogger LOGGER = new ClientLogger(HttpRequest.class);
static {
HttpRequestAccessHelper.setAccessor(new HttpRequestAccessHelper.HttpRequestAccessor() {
@Override
public int getRetryCount(HttpRequest httpRequest) {
return httpRequest.getRetryCount();
}
@Override
public HttpRequest setRetryCount(HttpRequest httpRequest, int retryCount) {
return httpRequest.setRetryCount(retryCount);
}
});
}
private HttpMethod httpMethod;
private URL url;
private HttpHeaders headers;
private BinaryData body;
private ServerSentEventListener serverSentEventListener;
private RequestOptions requestOptions;
private int retryCount;
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to as a {@link URL}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest(HttpMethod httpMethod, URL url) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
this.url = Objects.requireNonNull(url, "'url' cannot be null");
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Create a new {@link HttpRequest} instance.
*
* @param httpMethod The request {@link HttpMethod}.
* @param url The target address to send the request to.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
public HttpRequest(HttpMethod httpMethod, String url) {
this.httpMethod = Objects.requireNonNull(httpMethod, "'httpMethod' cannot be null");
setUrl(url);
this.headers = new HttpHeaders();
this.requestOptions = RequestOptions.NONE;
}
/**
* Get the request {@link HttpMethod}.
*
* @return The request {@link HttpMethod}.
*/
public HttpMethod getHttpMethod() {
return httpMethod;
}
/**
* Set the request {@link HttpMethod}.
*
* @param httpMethod The request {@link HttpMethod}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code httpMethod} is {@code null}.
*/
/**
* Get the target address as a {@link URL}.
*
* @return The target address as a {@link URL}.
*/
public URL getUrl() {
return url;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
*/
public HttpRequest setUrl(URL url) {
this.url = Objects.requireNonNull(url, "'url' cannot be null");
return this;
}
/**
* Set the target address to send the request to.
*
* @param url The target address as a {@link URL}.
*
* @return The updated {@link HttpRequest}.
*
* @throws NullPointerException if {@code url} is {@code null}.
* @throws IllegalArgumentException If {@code url} cannot be parsed into a valid {@link URL}.
*/
@SuppressWarnings("deprecation")
public HttpRequest setUrl(String url) {
try {
this.url = new URL(Objects.requireNonNull(url, "'url' cannot be null"));
} catch (MalformedURLException ex) {
throw LOGGER.logThrowableAsError(new IllegalArgumentException("'url' must be a valid URL.", ex));
}
return this;
}
/**
* Get the request {@link HttpHeaders headers}.
*
* @return The {@link HttpHeaders headers} to be sent.
*/
public HttpHeaders getHeaders() {
return headers;
}
/**
* Set the request {@link HttpHeaders headers}.
*
* @param headers The {@link HttpHeaders headers} to set.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setHeaders(HttpHeaders headers) {
this.headers = headers;
return this;
}
/**
* Get the request content.
*
* @return The content to be sent.
*/
public BinaryData getBody() {
return body;
}
/**
* Set the request content.
*
* <p>If the provided content has known length, i.e. {@link BinaryData
* {@code Content-Length} header is updated. Otherwise, if the provided content has unknown length, i.e.
* {@link BinaryData
* to indicate the length of the content, or use {@code Transfer-Encoding: chunked}.</p>
*
* @param content The request content.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setBody(BinaryData content) {
this.body = content;
if (content != null && content.getLength() != null) {
headers.set(HttpHeaderName.CONTENT_LENGTH, String.valueOf(content.getLength()));
}
return this;
}
/**
* Get the request {@link RequestOptions options}.
*
* @return The request {@link RequestOptions options}.
*/
public RequestOptions getRequestOptions() {
return requestOptions;
}
/**
* Set the request {@link RequestOptions options}.
*
* @param requestOptions The request {@link RequestOptions options}.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setRequestOptions(RequestOptions requestOptions) {
this.requestOptions = requestOptions;
return this;
}
/**
* Get the specified event stream {@link ServerSentEventListener listener} for this request.
*
* @return The {@link ServerSentEventListener listener} for this request.
*/
public ServerSentEventListener getServerSentEventListener() {
return serverSentEventListener;
}
/**
* Set an event stream {@link ServerSentEventListener listener} for this request.
*
* @param serverSentEventListener The {@link ServerSentEventListener listener} to set for this request.
*
* @return The updated {@link HttpRequest}.
*/
public HttpRequest setServerSentEventListener(ServerSentEventListener serverSentEventListener) {
this.serverSentEventListener = serverSentEventListener;
return this;
}
/**
* Gets the number of times the request has been retried.
*
* @return The number of times the request has been retried.
*/
private int getRetryCount() {
return retryCount;
}
/**
* Sets the number of times the request has been retried.
*
* @param retryCount The number of times the request has been retried.
*
* @return The updated {@link HttpRequest} object.
*/
private HttpRequest setRetryCount(int retryCount) {
this.retryCount = retryCount;
return this;
}
} |
Suggestion: "threshold" -> "marker"/"sentinel"/etc. (threshold makes me think of a scalar that can be exceeded or "crossed") | private Flux<StreamUpdate> mapEventStream() {
return source
.publishOn(Schedulers.boundedElastic())
.concatMap(byteBuffer -> {
List<StreamUpdate> values = new ArrayList<>();
byte[] byteArray = byteBuffer.array();
byte[] outByteArray = outStream.toByteArray();
int lineBreakCharsEncountered = outByteArray.length > 0 && isByteLineFeed(outByteArray[outByteArray.length - 1]) ? 1 : 0;
for (byte currentByte : byteArray) {
outStream.write(currentByte);
if (isByteLineFeed(currentByte)) {
lineBreakCharsEncountered++;
if (lineBreakCharsEncountered == LINE_BREAK_CHAR_COUNT_THRESHOLD) {
String currentLine;
try {
currentLine = outStream.toString(StandardCharsets.UTF_8.name());
handleCurrentEvent(currentLine, values);
} catch (IOException e) {
return Flux.error(e);
}
outStream = new ByteArrayOutputStream();
}
} else {
if (!isByteCarriageReturn(currentByte)) {
lineBreakCharsEncountered = 0;
}
}
}
try {
String remainingBytes = outStream.toString(StandardCharsets.UTF_8.name());
if (remainingBytes.endsWith("\n\n") || remainingBytes.endsWith("\r\n\r\n")) {
handleCurrentEvent(remainingBytes, values);
}
} catch (IllegalArgumentException | UncheckedIOException e) {
return Flux.fromIterable(values);
} catch (IOException e) {
return Flux.error(e);
}
return Flux.fromIterable(values);
}).cache();
} | if (lineBreakCharsEncountered == LINE_BREAK_CHAR_COUNT_THRESHOLD) { | private Flux<StreamUpdate> mapEventStream() {
return source
.publishOn(Schedulers.boundedElastic())
.concatMap(byteBuffer -> {
List<StreamUpdate> values = new ArrayList<>();
byte[] byteArray = byteBuffer.array();
byte[] outByteArray = outStream.toByteArray();
int lineBreakCharsEncountered = outByteArray.length > 0 && isByteLineFeed(outByteArray[outByteArray.length - 1]) ? 1 : 0;
for (byte currentByte : byteArray) {
outStream.write(currentByte);
if (isByteLineFeed(currentByte)) {
lineBreakCharsEncountered++;
if (lineBreakCharsEncountered == SSE_CHUNK_LINE_BREAK_COUNT_MARKER) {
String currentLine;
try {
currentLine = outStream.toString(StandardCharsets.UTF_8.name());
handleCurrentEvent(currentLine, values);
} catch (IOException e) {
return Flux.error(e);
}
outStream = new ByteArrayOutputStream();
}
} else {
if (!isByteCarriageReturn(currentByte)) {
lineBreakCharsEncountered = 0;
}
}
}
try {
String remainingBytes = outStream.toString(StandardCharsets.UTF_8.name());
if (remainingBytes.endsWith("\n\n") || remainingBytes.endsWith("\r\n\r\n")) {
handleCurrentEvent(remainingBytes, values);
}
} catch (IllegalArgumentException | UncheckedIOException e) {
return Flux.fromIterable(values);
} catch (IOException e) {
return Flux.error(e);
}
return Flux.fromIterable(values);
}).cache();
} | class OpenAIServerSentEvents {
private static final int LINE_BREAK_CHAR_COUNT_THRESHOLD = 2;
private final StreamTypeFactory eventDeserializer = new StreamTypeFactory();
private final Flux<ByteBuffer> source;
private ByteArrayOutputStream outStream;
public OpenAIServerSentEvents(Flux<ByteBuffer> source) {
this.source = source;
this.outStream = new ByteArrayOutputStream();
}
public Flux<StreamUpdate> getEvents() {
return mapEventStream();
}
/**
* Maps the byte buffer to a stream of server sent events.
*
* @return A stream of server sent events deserialized into StreamUpdates.
*/
/**
* Determines if character is a line feed (0xA).
*
* @param character The character to check.
* @return True if character is a line feed character, false otherwise.
*/
private boolean isByteLineFeed(byte character) {
return character == 0xA;
}
/**
* Determines if character is a carriage return (0xD).
*
* @param character The character to check.
* @return True if character is a carriage return character, false otherwise.
*/
private boolean isByteCarriageReturn(byte character) {
return character == 0xD;
}
/**
* Handles a collected event from the byte buffer which is formated as a UTF_8 string.
*
* @param currentEvent The current line of the server sent event.
* @param outputValues The list of values to add the current line to.
* @throws IllegalStateException If the current event contains a server side error.
*/
public void handleCurrentEvent(String currentEvent, List<StreamUpdate> outputValues) throws IllegalArgumentException {
if (currentEvent.isEmpty()) {
return;
}
String[] lines = currentEvent.split("\n", 2);
if (lines.length != 2) {
return;
}
if(lines[0].isEmpty() || lines[1].isEmpty()) {
return;
}
String eventName = lines[0].substring(6).trim();
String eventJson = lines[1].substring(5).trim();
if (DONE.equals(AssistantStreamEvent.fromString(eventName))) {
return;
}
if (ERROR.equals(AssistantStreamEvent.fromString(eventName))) {
throw new IllegalArgumentException("Server sent event type not supported");
}
outputValues.add(this.eventDeserializer.deserializeEvent(eventName, BinaryData.fromString(eventJson)));
}
} | class OpenAIServerSentEvents {
private static final int SSE_CHUNK_LINE_BREAK_COUNT_MARKER = 2;
/**
* A factory that determines into which type to deserialize the server sent events.
*/
private final StreamTypeFactory eventDeserializer = new StreamTypeFactory();
/**
* The source of the server sent events.
*/
private final Flux<ByteBuffer> source;
/**
* The output stream accumulating the server sent events.
*/
private ByteArrayOutputStream outStream;
/**
* Creates a new instance of OpenAIServerSentEvents.
*
* @param source The source of the server sent events.
*/
public OpenAIServerSentEvents(Flux<ByteBuffer> source) {
this.source = source;
this.outStream = new ByteArrayOutputStream();
}
/**
* Gets the stream of server sent events.
*
* @return A stream of server sent events.
*/
public Flux<StreamUpdate> getEvents() {
return mapEventStream();
}
/**
* Maps the byte buffer to a stream of server sent events.
*
* @return A stream of server sent events deserialized into StreamUpdates.
*/
/**
* Determines if character is a line feed (0xA).
*
* @param character The character to check.
* @return True if character is a line feed character, false otherwise.
*/
private boolean isByteLineFeed(byte character) {
return character == 0xA;
}
/**
* Determines if character is a carriage return (0xD).
*
* @param character The character to check.
* @return True if character is a carriage return character, false otherwise.
*/
private boolean isByteCarriageReturn(byte character) {
return character == 0xD;
}
/**
* Handles a collected event from the byte buffer which is formated as a UTF_8 string.
*
* @param currentEvent The current line of the server sent event.
* @param outputValues The list of values to add the current line to.
* @throws IllegalStateException If the current event contains a server side error.
*/
public void handleCurrentEvent(String currentEvent, List<StreamUpdate> outputValues) throws IllegalArgumentException {
if (currentEvent.isEmpty()) {
return;
}
String[] lines = currentEvent.split("\n", 2);
if (lines.length != 2) {
return;
}
if (lines[0].isEmpty() || lines[1].isEmpty()) {
return;
}
String eventName = lines[0].substring(6).trim();
String eventJson = lines[1].substring(5).trim();
if (DONE.equals(AssistantStreamEvent.fromString(eventName))) {
return;
}
if (ERROR.equals(AssistantStreamEvent.fromString(eventName))) {
throw new IllegalArgumentException(eventJson);
}
outputValues.add(this.eventDeserializer.deserializeEvent(eventName, BinaryData.fromString(eventJson)));
}
} |
Thank you, now that you mention it, threshold does ring like a "minimum value that needs to be met" which doesn't reflect the reality here that we need exactly `2`. | private Flux<StreamUpdate> mapEventStream() {
return source
.publishOn(Schedulers.boundedElastic())
.concatMap(byteBuffer -> {
List<StreamUpdate> values = new ArrayList<>();
byte[] byteArray = byteBuffer.array();
byte[] outByteArray = outStream.toByteArray();
int lineBreakCharsEncountered = outByteArray.length > 0 && isByteLineFeed(outByteArray[outByteArray.length - 1]) ? 1 : 0;
for (byte currentByte : byteArray) {
outStream.write(currentByte);
if (isByteLineFeed(currentByte)) {
lineBreakCharsEncountered++;
if (lineBreakCharsEncountered == LINE_BREAK_CHAR_COUNT_THRESHOLD) {
String currentLine;
try {
currentLine = outStream.toString(StandardCharsets.UTF_8.name());
handleCurrentEvent(currentLine, values);
} catch (IOException e) {
return Flux.error(e);
}
outStream = new ByteArrayOutputStream();
}
} else {
if (!isByteCarriageReturn(currentByte)) {
lineBreakCharsEncountered = 0;
}
}
}
try {
String remainingBytes = outStream.toString(StandardCharsets.UTF_8.name());
if (remainingBytes.endsWith("\n\n") || remainingBytes.endsWith("\r\n\r\n")) {
handleCurrentEvent(remainingBytes, values);
}
} catch (IllegalArgumentException | UncheckedIOException e) {
return Flux.fromIterable(values);
} catch (IOException e) {
return Flux.error(e);
}
return Flux.fromIterable(values);
}).cache();
} | if (lineBreakCharsEncountered == LINE_BREAK_CHAR_COUNT_THRESHOLD) { | private Flux<StreamUpdate> mapEventStream() {
return source
.publishOn(Schedulers.boundedElastic())
.concatMap(byteBuffer -> {
List<StreamUpdate> values = new ArrayList<>();
byte[] byteArray = byteBuffer.array();
byte[] outByteArray = outStream.toByteArray();
int lineBreakCharsEncountered = outByteArray.length > 0 && isByteLineFeed(outByteArray[outByteArray.length - 1]) ? 1 : 0;
for (byte currentByte : byteArray) {
outStream.write(currentByte);
if (isByteLineFeed(currentByte)) {
lineBreakCharsEncountered++;
if (lineBreakCharsEncountered == SSE_CHUNK_LINE_BREAK_COUNT_MARKER) {
String currentLine;
try {
currentLine = outStream.toString(StandardCharsets.UTF_8.name());
handleCurrentEvent(currentLine, values);
} catch (IOException e) {
return Flux.error(e);
}
outStream = new ByteArrayOutputStream();
}
} else {
if (!isByteCarriageReturn(currentByte)) {
lineBreakCharsEncountered = 0;
}
}
}
try {
String remainingBytes = outStream.toString(StandardCharsets.UTF_8.name());
if (remainingBytes.endsWith("\n\n") || remainingBytes.endsWith("\r\n\r\n")) {
handleCurrentEvent(remainingBytes, values);
}
} catch (IllegalArgumentException | UncheckedIOException e) {
return Flux.fromIterable(values);
} catch (IOException e) {
return Flux.error(e);
}
return Flux.fromIterable(values);
}).cache();
} | class OpenAIServerSentEvents {
private static final int LINE_BREAK_CHAR_COUNT_THRESHOLD = 2;
private final StreamTypeFactory eventDeserializer = new StreamTypeFactory();
private final Flux<ByteBuffer> source;
private ByteArrayOutputStream outStream;
public OpenAIServerSentEvents(Flux<ByteBuffer> source) {
this.source = source;
this.outStream = new ByteArrayOutputStream();
}
public Flux<StreamUpdate> getEvents() {
return mapEventStream();
}
/**
* Maps the byte buffer to a stream of server sent events.
*
* @return A stream of server sent events deserialized into StreamUpdates.
*/
/**
* Determines if character is a line feed (0xA).
*
* @param character The character to check.
* @return True if character is a line feed character, false otherwise.
*/
private boolean isByteLineFeed(byte character) {
return character == 0xA;
}
/**
* Determines if character is a carriage return (0xD).
*
* @param character The character to check.
* @return True if character is a carriage return character, false otherwise.
*/
private boolean isByteCarriageReturn(byte character) {
return character == 0xD;
}
/**
* Handles a collected event from the byte buffer which is formated as a UTF_8 string.
*
* @param currentEvent The current line of the server sent event.
* @param outputValues The list of values to add the current line to.
* @throws IllegalStateException If the current event contains a server side error.
*/
public void handleCurrentEvent(String currentEvent, List<StreamUpdate> outputValues) throws IllegalArgumentException {
if (currentEvent.isEmpty()) {
return;
}
String[] lines = currentEvent.split("\n", 2);
if (lines.length != 2) {
return;
}
if(lines[0].isEmpty() || lines[1].isEmpty()) {
return;
}
String eventName = lines[0].substring(6).trim();
String eventJson = lines[1].substring(5).trim();
if (DONE.equals(AssistantStreamEvent.fromString(eventName))) {
return;
}
if (ERROR.equals(AssistantStreamEvent.fromString(eventName))) {
throw new IllegalArgumentException("Server sent event type not supported");
}
outputValues.add(this.eventDeserializer.deserializeEvent(eventName, BinaryData.fromString(eventJson)));
}
} | class OpenAIServerSentEvents {
private static final int SSE_CHUNK_LINE_BREAK_COUNT_MARKER = 2;
/**
* A factory that determines into which type to deserialize the server sent events.
*/
private final StreamTypeFactory eventDeserializer = new StreamTypeFactory();
/**
* The source of the server sent events.
*/
private final Flux<ByteBuffer> source;
/**
* The output stream accumulating the server sent events.
*/
private ByteArrayOutputStream outStream;
/**
* Creates a new instance of OpenAIServerSentEvents.
*
* @param source The source of the server sent events.
*/
public OpenAIServerSentEvents(Flux<ByteBuffer> source) {
this.source = source;
this.outStream = new ByteArrayOutputStream();
}
/**
* Gets the stream of server sent events.
*
* @return A stream of server sent events.
*/
public Flux<StreamUpdate> getEvents() {
return mapEventStream();
}
/**
* Maps the byte buffer to a stream of server sent events.
*
* @return A stream of server sent events deserialized into StreamUpdates.
*/
/**
* Determines if character is a line feed (0xA).
*
* @param character The character to check.
* @return True if character is a line feed character, false otherwise.
*/
private boolean isByteLineFeed(byte character) {
return character == 0xA;
}
/**
* Determines if character is a carriage return (0xD).
*
* @param character The character to check.
* @return True if character is a carriage return character, false otherwise.
*/
private boolean isByteCarriageReturn(byte character) {
return character == 0xD;
}
/**
* Handles a collected event from the byte buffer which is formated as a UTF_8 string.
*
* @param currentEvent The current line of the server sent event.
* @param outputValues The list of values to add the current line to.
* @throws IllegalStateException If the current event contains a server side error.
*/
public void handleCurrentEvent(String currentEvent, List<StreamUpdate> outputValues) throws IllegalArgumentException {
if (currentEvent.isEmpty()) {
return;
}
String[] lines = currentEvent.split("\n", 2);
if (lines.length != 2) {
return;
}
if (lines[0].isEmpty() || lines[1].isEmpty()) {
return;
}
String eventName = lines[0].substring(6).trim();
String eventJson = lines[1].substring(5).trim();
if (DONE.equals(AssistantStreamEvent.fromString(eventName))) {
return;
}
if (ERROR.equals(AssistantStreamEvent.fromString(eventName))) {
throw new IllegalArgumentException(eventJson);
}
outputValues.add(this.eventDeserializer.deserializeEvent(eventName, BinaryData.fromString(eventJson)));
}
} |
Would the .block() calls be removed later on? | public void setPropertiesEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
primaryShareAsyncClient.createWithResponse(options).block();
ShareSetPropertiesOptions setPropertiesOptions = new ShareSetPropertiesOptions();
setPropertiesOptions.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
setPropertiesOptions.setAccessTier(ShareAccessTier.TRANSACTION_OPTIMIZED);
primaryShareAsyncClient.setProperties(setPropertiesOptions).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
} | primaryShareAsyncClient.createWithResponse(options).block(); | public void setPropertiesEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
primaryShareAsyncClient.createWithResponse(options).block();
ShareSetPropertiesOptions setPropertiesOptions = new ShareSetPropertiesOptions();
setPropertiesOptions.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
setPropertiesOptions.setAccessTier(ShareAccessTier.TRANSACTION_OPTIMIZED);
primaryShareAsyncClient.setProperties(setPropertiesOptions).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
} | class ShareAsyncApiTests extends FileShareTestBase {
private ShareAsyncClient primaryShareAsyncClient;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
primaryFileServiceAsyncClient = fileServiceBuilderHelper().buildAsyncClient();
primaryShareAsyncClient = primaryFileServiceAsyncClient.getShareAsyncClient(shareName);
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getShareURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String shareURL = primaryShareAsyncClient.getShareUrl();
assertEquals(expectURL, shareURL);
}
@Test
public void getRootDirectoryClient() {
ShareDirectoryAsyncClient directoryClient = primaryShareAsyncClient.getRootDirectoryClient();
assertInstanceOf(ShareDirectoryAsyncClient.class, directoryClient);
}
@Test
public void getFileClientDoesNotCreateAFile() {
ShareFileAsyncClient fileClient = primaryShareAsyncClient.getFileClient("testFile");
assertInstanceOf(ShareFileAsyncClient.class, fileClient);
}
@Test
public void createShare() {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(null, (Integer) null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@Test
public void createShareSasError() {
ShareServiceAsyncClient unauthorizedServiceClient = fileServiceBuilderHelper()
.sasToken("sig=dummyToken")
.buildAsyncClient();
ShareAsyncClient share = unauthorizedServiceClient.getShareAsyncClient(generateShareName());
StepVerifier.create(share.create())
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
assertTrue(e.getServiceMessage().contains("AuthenticationErrorDetail"));
});
}
@ParameterizedTest
@MethodSource("createShareWithArgsSupplier")
public void createShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createShareWithArgsSupplier() {
return Stream.of(Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode, errMessage));
}
private static Stream<Arguments> createShareWithInvalidArgsSupplier() {
return Stream.of(
Arguments.of(Collections.singletonMap("", "value"), 1, 400, ShareErrorCode.EMPTY_METADATA_KEY),
Arguments.of(Collections.singletonMap("metadata!", "value"), 1, 400, ShareErrorCode.INVALID_METADATA),
Arguments.of(testMetadata, 6000, 400, ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void createSnapshot() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(null)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder()
.shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot())
.buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotError() {
StepVerifier.create(primaryShareAsyncClient.createSnapshot()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createSnapshotMetadata() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(testMetadata))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder().shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot()).buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(Collections.singletonMap("", "value")))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsShare() {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsShareThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<ShareInfo> initialResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
Response<ShareInfo> secondResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@ParameterizedTest
@MethodSource("createIfNotExistsShareWithArgsSupplier")
public void createIfNotExistsShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createIfNotExistsShareWithArgsSupplier() {
return Stream.of(
Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createIfNotExistsShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMessage));
}
@Test
public void deleteShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteWithResponse())
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteShareError() {
StepVerifier.create(primaryShareAsyncClient.delete())
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void deleteIfExistsShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteIfExistsWithResponse(null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteIfExistsShareThatDoesNotExist() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryShareAsyncClient.create().block();
Response<Boolean> initialResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
sleepIfRunningAgainstService(45000);
Response<Boolean> secondResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 202);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 404);
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryShareAsyncClient.createWithResponse(testMetadata, 1).block();
StepVerifier.create(primaryShareAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(testMetadata, it.getValue().getMetadata());
assertEquals(it.getValue().getQuota(), 1);
}).verifyComplete();
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryShareAsyncClient.getProperties()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@PlaybackOnly
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getPropertiesPremium(String protocol, ShareRootSquash rootSquash) {
ShareProtocols enabledProtocol = ModelHelper.parseShareProtocols(protocol);
ShareAsyncClient premiumShare = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(), new ShareCreateOptions()
.setMetadata(testMetadata).setProtocols(enabledProtocol).setRootSquash(rootSquash), null)
.block()).getValue();
StepVerifier.create(premiumShare.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue());
assertEquals(testMetadata, it.getValue().getMetadata());
assertNotNull(it.getValue().getProvisionedIops());
assertNotNull(it.getValue().getProvisionedBandwidthMiBps());
assertNotNull(it.getValue().getNextAllowedQuotaDowngradeTime());
assertEquals(enabledProtocol.toString(), it.getValue().getProtocols().toString());
assertEquals(rootSquash, it.getValue().getRootSquash());
}).verifyComplete();
}
@PlaybackOnly
@Test
public void setPremiumProperties() {
List<ShareRootSquash> rootSquashes = Arrays.asList(
ShareRootSquash.ALL_SQUASH,
ShareRootSquash.NO_ROOT_SQUASH,
ShareRootSquash.ROOT_SQUASH);
for (ShareRootSquash rootSquash : rootSquashes) {
ShareAsyncClient premiumShareClient = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(),
new ShareCreateOptions().setProtocols(new ShareProtocols().setNfsEnabled(true)), null)
.block()).getValue();
premiumShareClient.setProperties(new ShareSetPropertiesOptions().setRootSquash(rootSquash)).block();
StepVerifier.create(premiumShareClient.getProperties()).assertNext(it ->
assertEquals(rootSquash, it.getRootSquash())).verifyComplete();
}
}
@Test
public void setQuota() {
primaryShareAsyncClient.createWithResponse(null, 1).block();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 1)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setQuotaWithResponse(2)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 2)).verifyComplete();
}
@Test
public void setQuotaError() {
StepVerifier.create(primaryShareAsyncClient.setQuota(2)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void setMetadata() {
primaryShareAsyncClient.createWithResponse(testMetadata, null).block();
Map<String, String> metadataAfterSet = Collections.singletonMap("afterset", "value");
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setMetadataWithResponse(metadataAfterSet))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(metadataAfterSet, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
StepVerifier.create(primaryShareAsyncClient.setMetadata(testMetadata)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getStatistics(long size, int gigabytes) {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile("tempFile", size).block();
StepVerifier.create(primaryShareAsyncClient.getStatisticsWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(it.getValue().getShareUsageInBytes(), size);
assertEquals(it.getValue().getShareUsageInGB(), gigabytes);
}).verifyComplete();
}
@Test
public void getStatisticsError() {
StepVerifier.create(primaryShareAsyncClient.getStatistics()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectory("test/directory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
testMetadata)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testdirectory", null, null,
Collections.singletonMap("", "value"))).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions())).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
client.create().block();
Response<ShareDirectoryAsyncClient> initialResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
Response<ShareDirectoryAsyncClient> secondResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
assertNotNull(initialResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExists("test/directory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createIfNotExistsDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setMetadata(testMetadata)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setSmbProperties(smbProperties)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testdirectory",
new ShareDirectoryCreateOptions().setMetadata(Collections.singletonMap("", "value"))))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createFile() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createFileFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, null, null, null, null))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMsg));
}
@Test
public void createFileLease() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease()
.block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void createFileLeaseFail() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createFileMaxOverload() {
primaryShareAsyncClient.create().block();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, httpHeaders,
smbProperties, FILE_PERMISSION, testMetadata)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null,
metadata)).verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
errMsg));
}
@Test
public void deleteDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteDirectoryError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectory("testdirectory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void deleteFileError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteFile("testdirectory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteIfExistsFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createPermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse(FILE_PERMISSION))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createAndGetPermission() {
primaryShareAsyncClient.create().block();
String filePermissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse(filePermissionKey))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
}
@Test
public void createPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse("abcde")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.fromString("FileInvalidPermission")));
}
@Test
public void getPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse("abcde"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareAsyncClient shareSnapshotClient = shareBuilderHelper(shareName).snapshot(snapshot).buildAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryShareAsyncClient.getShareName());
}
@Test
public void defaultAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder().shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP))
.audience(null)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void storageAccountAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryShareAsyncClient.getAccountName()))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void audienceError() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badaudience"))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
});
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-05-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
public void createEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
options.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
primaryShareAsyncClient.createWithResponse(options).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
}
private static Stream<Arguments> createEnableSnapshotVirtualDirectoryAccessSupplier() {
return Stream.of(
Arguments.of(true),
Arguments.of(false),
Arguments.of((Boolean) null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
} | class ShareAsyncApiTests extends FileShareTestBase {
private ShareAsyncClient primaryShareAsyncClient;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
primaryFileServiceAsyncClient = fileServiceBuilderHelper().buildAsyncClient();
primaryShareAsyncClient = primaryFileServiceAsyncClient.getShareAsyncClient(shareName);
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getShareURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String shareURL = primaryShareAsyncClient.getShareUrl();
assertEquals(expectURL, shareURL);
}
@Test
public void getRootDirectoryClient() {
ShareDirectoryAsyncClient directoryClient = primaryShareAsyncClient.getRootDirectoryClient();
assertInstanceOf(ShareDirectoryAsyncClient.class, directoryClient);
}
@Test
public void getFileClientDoesNotCreateAFile() {
ShareFileAsyncClient fileClient = primaryShareAsyncClient.getFileClient("testFile");
assertInstanceOf(ShareFileAsyncClient.class, fileClient);
}
@Test
public void createShare() {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(null, (Integer) null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@Test
public void createShareSasError() {
ShareServiceAsyncClient unauthorizedServiceClient = fileServiceBuilderHelper()
.sasToken("sig=dummyToken")
.buildAsyncClient();
ShareAsyncClient share = unauthorizedServiceClient.getShareAsyncClient(generateShareName());
StepVerifier.create(share.create())
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
assertTrue(e.getServiceMessage().contains("AuthenticationErrorDetail"));
});
}
@ParameterizedTest
@MethodSource("createShareWithArgsSupplier")
public void createShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createShareWithArgsSupplier() {
return Stream.of(Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode, errMessage));
}
private static Stream<Arguments> createShareWithInvalidArgsSupplier() {
return Stream.of(
Arguments.of(Collections.singletonMap("", "value"), 1, 400, ShareErrorCode.EMPTY_METADATA_KEY),
Arguments.of(Collections.singletonMap("metadata!", "value"), 1, 400, ShareErrorCode.INVALID_METADATA),
Arguments.of(testMetadata, 6000, 400, ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void createSnapshot() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(null)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder()
.shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot())
.buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotError() {
StepVerifier.create(primaryShareAsyncClient.createSnapshot()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createSnapshotMetadata() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(testMetadata))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder().shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot()).buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(Collections.singletonMap("", "value")))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsShare() {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsShareThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<ShareInfo> initialResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
Response<ShareInfo> secondResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@ParameterizedTest
@MethodSource("createIfNotExistsShareWithArgsSupplier")
public void createIfNotExistsShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createIfNotExistsShareWithArgsSupplier() {
return Stream.of(
Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createIfNotExistsShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMessage));
}
@Test
public void deleteShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteWithResponse())
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteShareError() {
StepVerifier.create(primaryShareAsyncClient.delete())
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void deleteIfExistsShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteIfExistsWithResponse(null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteIfExistsShareThatDoesNotExist() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryShareAsyncClient.create().block();
Response<Boolean> initialResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
sleepIfRunningAgainstService(45000);
Response<Boolean> secondResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 202);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 404);
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryShareAsyncClient.createWithResponse(testMetadata, 1).block();
StepVerifier.create(primaryShareAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(testMetadata, it.getValue().getMetadata());
assertEquals(it.getValue().getQuota(), 1);
}).verifyComplete();
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryShareAsyncClient.getProperties()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@PlaybackOnly
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getPropertiesPremium(String protocol, ShareRootSquash rootSquash) {
ShareProtocols enabledProtocol = ModelHelper.parseShareProtocols(protocol);
ShareAsyncClient premiumShare = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(), new ShareCreateOptions()
.setMetadata(testMetadata).setProtocols(enabledProtocol).setRootSquash(rootSquash), null)
.block()).getValue();
StepVerifier.create(premiumShare.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue());
assertEquals(testMetadata, it.getValue().getMetadata());
assertNotNull(it.getValue().getProvisionedIops());
assertNotNull(it.getValue().getProvisionedBandwidthMiBps());
assertNotNull(it.getValue().getNextAllowedQuotaDowngradeTime());
assertEquals(enabledProtocol.toString(), it.getValue().getProtocols().toString());
assertEquals(rootSquash, it.getValue().getRootSquash());
}).verifyComplete();
}
@PlaybackOnly
@Test
public void setPremiumProperties() {
List<ShareRootSquash> rootSquashes = Arrays.asList(
ShareRootSquash.ALL_SQUASH,
ShareRootSquash.NO_ROOT_SQUASH,
ShareRootSquash.ROOT_SQUASH);
for (ShareRootSquash rootSquash : rootSquashes) {
ShareAsyncClient premiumShareClient = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(),
new ShareCreateOptions().setProtocols(new ShareProtocols().setNfsEnabled(true)), null)
.block()).getValue();
premiumShareClient.setProperties(new ShareSetPropertiesOptions().setRootSquash(rootSquash)).block();
StepVerifier.create(premiumShareClient.getProperties()).assertNext(it ->
assertEquals(rootSquash, it.getRootSquash())).verifyComplete();
}
}
@Test
public void setQuota() {
primaryShareAsyncClient.createWithResponse(null, 1).block();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 1)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setQuotaWithResponse(2)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 2)).verifyComplete();
}
@Test
public void setQuotaError() {
StepVerifier.create(primaryShareAsyncClient.setQuota(2)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void setMetadata() {
primaryShareAsyncClient.createWithResponse(testMetadata, null).block();
Map<String, String> metadataAfterSet = Collections.singletonMap("afterset", "value");
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setMetadataWithResponse(metadataAfterSet))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(metadataAfterSet, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
StepVerifier.create(primaryShareAsyncClient.setMetadata(testMetadata)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getStatistics(long size, int gigabytes) {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile("tempFile", size).block();
StepVerifier.create(primaryShareAsyncClient.getStatisticsWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(it.getValue().getShareUsageInBytes(), size);
assertEquals(it.getValue().getShareUsageInGB(), gigabytes);
}).verifyComplete();
}
@Test
public void getStatisticsError() {
StepVerifier.create(primaryShareAsyncClient.getStatistics()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectory("test/directory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
testMetadata)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testdirectory", null, null,
Collections.singletonMap("", "value"))).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions())).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
client.create().block();
Response<ShareDirectoryAsyncClient> initialResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
Response<ShareDirectoryAsyncClient> secondResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
assertNotNull(initialResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExists("test/directory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createIfNotExistsDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setMetadata(testMetadata)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setSmbProperties(smbProperties)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testdirectory",
new ShareDirectoryCreateOptions().setMetadata(Collections.singletonMap("", "value"))))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createFile() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createFileFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, null, null, null, null))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMsg));
}
@Test
public void createFileLease() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease()
.block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void createFileLeaseFail() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createFileMaxOverload() {
primaryShareAsyncClient.create().block();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, httpHeaders,
smbProperties, FILE_PERMISSION, testMetadata)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null,
metadata)).verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
errMsg));
}
@Test
public void deleteDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteDirectoryError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectory("testdirectory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void deleteFileError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteFile("testdirectory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteIfExistsFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createPermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse(FILE_PERMISSION))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createAndGetPermission() {
primaryShareAsyncClient.create().block();
String filePermissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse(filePermissionKey))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
}
@Test
public void createPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse("abcde")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.fromString("FileInvalidPermission")));
}
@Test
public void getPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse("abcde"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareAsyncClient shareSnapshotClient = shareBuilderHelper(shareName).snapshot(snapshot).buildAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryShareAsyncClient.getShareName());
}
@Test
public void defaultAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder().shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP))
.audience(null)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void storageAccountAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryShareAsyncClient.getAccountName()))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void audienceError() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badaudience"))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
});
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-05-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
public void createEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
options.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
primaryShareAsyncClient.createWithResponse(options).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
}
private static Stream<Arguments> createEnableSnapshotVirtualDirectoryAccessSupplier() {
return Stream.of(
Arguments.of(true),
Arguments.of(false),
Arguments.of((Boolean) null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
} |
Yep, I'm going to go back through all of file shares to remove the blocking once I'm done with STG94 and the blob blocking removal. | public void setPropertiesEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
primaryShareAsyncClient.createWithResponse(options).block();
ShareSetPropertiesOptions setPropertiesOptions = new ShareSetPropertiesOptions();
setPropertiesOptions.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
setPropertiesOptions.setAccessTier(ShareAccessTier.TRANSACTION_OPTIMIZED);
primaryShareAsyncClient.setProperties(setPropertiesOptions).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
} | primaryShareAsyncClient.createWithResponse(options).block(); | public void setPropertiesEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
primaryShareAsyncClient.createWithResponse(options).block();
ShareSetPropertiesOptions setPropertiesOptions = new ShareSetPropertiesOptions();
setPropertiesOptions.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
setPropertiesOptions.setAccessTier(ShareAccessTier.TRANSACTION_OPTIMIZED);
primaryShareAsyncClient.setProperties(setPropertiesOptions).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
} | class ShareAsyncApiTests extends FileShareTestBase {
private ShareAsyncClient primaryShareAsyncClient;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
primaryFileServiceAsyncClient = fileServiceBuilderHelper().buildAsyncClient();
primaryShareAsyncClient = primaryFileServiceAsyncClient.getShareAsyncClient(shareName);
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getShareURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String shareURL = primaryShareAsyncClient.getShareUrl();
assertEquals(expectURL, shareURL);
}
@Test
public void getRootDirectoryClient() {
ShareDirectoryAsyncClient directoryClient = primaryShareAsyncClient.getRootDirectoryClient();
assertInstanceOf(ShareDirectoryAsyncClient.class, directoryClient);
}
@Test
public void getFileClientDoesNotCreateAFile() {
ShareFileAsyncClient fileClient = primaryShareAsyncClient.getFileClient("testFile");
assertInstanceOf(ShareFileAsyncClient.class, fileClient);
}
@Test
public void createShare() {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(null, (Integer) null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@Test
public void createShareSasError() {
ShareServiceAsyncClient unauthorizedServiceClient = fileServiceBuilderHelper()
.sasToken("sig=dummyToken")
.buildAsyncClient();
ShareAsyncClient share = unauthorizedServiceClient.getShareAsyncClient(generateShareName());
StepVerifier.create(share.create())
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
assertTrue(e.getServiceMessage().contains("AuthenticationErrorDetail"));
});
}
@ParameterizedTest
@MethodSource("createShareWithArgsSupplier")
public void createShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createShareWithArgsSupplier() {
return Stream.of(Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode, errMessage));
}
private static Stream<Arguments> createShareWithInvalidArgsSupplier() {
return Stream.of(
Arguments.of(Collections.singletonMap("", "value"), 1, 400, ShareErrorCode.EMPTY_METADATA_KEY),
Arguments.of(Collections.singletonMap("metadata!", "value"), 1, 400, ShareErrorCode.INVALID_METADATA),
Arguments.of(testMetadata, 6000, 400, ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void createSnapshot() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(null)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder()
.shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot())
.buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotError() {
StepVerifier.create(primaryShareAsyncClient.createSnapshot()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createSnapshotMetadata() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(testMetadata))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder().shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot()).buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(Collections.singletonMap("", "value")))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsShare() {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsShareThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<ShareInfo> initialResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
Response<ShareInfo> secondResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@ParameterizedTest
@MethodSource("createIfNotExistsShareWithArgsSupplier")
public void createIfNotExistsShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createIfNotExistsShareWithArgsSupplier() {
return Stream.of(
Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createIfNotExistsShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMessage));
}
@Test
public void deleteShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteWithResponse())
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteShareError() {
StepVerifier.create(primaryShareAsyncClient.delete())
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void deleteIfExistsShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteIfExistsWithResponse(null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteIfExistsShareThatDoesNotExist() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryShareAsyncClient.create().block();
Response<Boolean> initialResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
sleepIfRunningAgainstService(45000);
Response<Boolean> secondResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 202);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 404);
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryShareAsyncClient.createWithResponse(testMetadata, 1).block();
StepVerifier.create(primaryShareAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(testMetadata, it.getValue().getMetadata());
assertEquals(it.getValue().getQuota(), 1);
}).verifyComplete();
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryShareAsyncClient.getProperties()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@PlaybackOnly
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getPropertiesPremium(String protocol, ShareRootSquash rootSquash) {
ShareProtocols enabledProtocol = ModelHelper.parseShareProtocols(protocol);
ShareAsyncClient premiumShare = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(), new ShareCreateOptions()
.setMetadata(testMetadata).setProtocols(enabledProtocol).setRootSquash(rootSquash), null)
.block()).getValue();
StepVerifier.create(premiumShare.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue());
assertEquals(testMetadata, it.getValue().getMetadata());
assertNotNull(it.getValue().getProvisionedIops());
assertNotNull(it.getValue().getProvisionedBandwidthMiBps());
assertNotNull(it.getValue().getNextAllowedQuotaDowngradeTime());
assertEquals(enabledProtocol.toString(), it.getValue().getProtocols().toString());
assertEquals(rootSquash, it.getValue().getRootSquash());
}).verifyComplete();
}
@PlaybackOnly
@Test
public void setPremiumProperties() {
List<ShareRootSquash> rootSquashes = Arrays.asList(
ShareRootSquash.ALL_SQUASH,
ShareRootSquash.NO_ROOT_SQUASH,
ShareRootSquash.ROOT_SQUASH);
for (ShareRootSquash rootSquash : rootSquashes) {
ShareAsyncClient premiumShareClient = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(),
new ShareCreateOptions().setProtocols(new ShareProtocols().setNfsEnabled(true)), null)
.block()).getValue();
premiumShareClient.setProperties(new ShareSetPropertiesOptions().setRootSquash(rootSquash)).block();
StepVerifier.create(premiumShareClient.getProperties()).assertNext(it ->
assertEquals(rootSquash, it.getRootSquash())).verifyComplete();
}
}
@Test
public void setQuota() {
primaryShareAsyncClient.createWithResponse(null, 1).block();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 1)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setQuotaWithResponse(2)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 2)).verifyComplete();
}
@Test
public void setQuotaError() {
StepVerifier.create(primaryShareAsyncClient.setQuota(2)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void setMetadata() {
primaryShareAsyncClient.createWithResponse(testMetadata, null).block();
Map<String, String> metadataAfterSet = Collections.singletonMap("afterset", "value");
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setMetadataWithResponse(metadataAfterSet))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(metadataAfterSet, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
StepVerifier.create(primaryShareAsyncClient.setMetadata(testMetadata)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getStatistics(long size, int gigabytes) {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile("tempFile", size).block();
StepVerifier.create(primaryShareAsyncClient.getStatisticsWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(it.getValue().getShareUsageInBytes(), size);
assertEquals(it.getValue().getShareUsageInGB(), gigabytes);
}).verifyComplete();
}
@Test
public void getStatisticsError() {
StepVerifier.create(primaryShareAsyncClient.getStatistics()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectory("test/directory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
testMetadata)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testdirectory", null, null,
Collections.singletonMap("", "value"))).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions())).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
client.create().block();
Response<ShareDirectoryAsyncClient> initialResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
Response<ShareDirectoryAsyncClient> secondResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
assertNotNull(initialResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExists("test/directory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createIfNotExistsDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setMetadata(testMetadata)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setSmbProperties(smbProperties)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testdirectory",
new ShareDirectoryCreateOptions().setMetadata(Collections.singletonMap("", "value"))))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createFile() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createFileFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, null, null, null, null))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMsg));
}
@Test
public void createFileLease() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease()
.block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void createFileLeaseFail() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createFileMaxOverload() {
primaryShareAsyncClient.create().block();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, httpHeaders,
smbProperties, FILE_PERMISSION, testMetadata)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null,
metadata)).verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
errMsg));
}
@Test
public void deleteDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteDirectoryError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectory("testdirectory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void deleteFileError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteFile("testdirectory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteIfExistsFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createPermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse(FILE_PERMISSION))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createAndGetPermission() {
primaryShareAsyncClient.create().block();
String filePermissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse(filePermissionKey))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
}
@Test
public void createPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse("abcde")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.fromString("FileInvalidPermission")));
}
@Test
public void getPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse("abcde"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareAsyncClient shareSnapshotClient = shareBuilderHelper(shareName).snapshot(snapshot).buildAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryShareAsyncClient.getShareName());
}
@Test
public void defaultAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder().shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP))
.audience(null)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void storageAccountAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryShareAsyncClient.getAccountName()))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void audienceError() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badaudience"))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
});
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-05-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
public void createEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
options.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
primaryShareAsyncClient.createWithResponse(options).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
}
private static Stream<Arguments> createEnableSnapshotVirtualDirectoryAccessSupplier() {
return Stream.of(
Arguments.of(true),
Arguments.of(false),
Arguments.of((Boolean) null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
} | class ShareAsyncApiTests extends FileShareTestBase {
private ShareAsyncClient primaryShareAsyncClient;
private String shareName;
private static Map<String, String> testMetadata;
private FileSmbProperties smbProperties;
private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL";
@BeforeEach
public void setup() {
shareName = generateShareName();
primaryFileServiceAsyncClient = fileServiceBuilderHelper().buildAsyncClient();
primaryShareAsyncClient = primaryFileServiceAsyncClient.getShareAsyncClient(shareName);
testMetadata = Collections.singletonMap("testmetadata", "value");
smbProperties = new FileSmbProperties().setNtfsFileAttributes(EnumSet.<NtfsFileAttributes>of(NtfsFileAttributes.NORMAL));
}
@Test
public void getShareURL() {
String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount()
.getConnectionString()).getAccountName();
String expectURL = String.format("https:
String shareURL = primaryShareAsyncClient.getShareUrl();
assertEquals(expectURL, shareURL);
}
@Test
public void getRootDirectoryClient() {
ShareDirectoryAsyncClient directoryClient = primaryShareAsyncClient.getRootDirectoryClient();
assertInstanceOf(ShareDirectoryAsyncClient.class, directoryClient);
}
@Test
public void getFileClientDoesNotCreateAFile() {
ShareFileAsyncClient fileClient = primaryShareAsyncClient.getFileClient("testFile");
assertInstanceOf(ShareFileAsyncClient.class, fileClient);
}
@Test
public void createShare() {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(null, (Integer) null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@Test
public void createShareSasError() {
ShareServiceAsyncClient unauthorizedServiceClient = fileServiceBuilderHelper()
.sasToken("sig=dummyToken")
.buildAsyncClient();
ShareAsyncClient share = unauthorizedServiceClient.getShareAsyncClient(generateShareName());
StepVerifier.create(share.create())
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
assertTrue(e.getServiceMessage().contains("AuthenticationErrorDetail"));
});
}
@ParameterizedTest
@MethodSource("createShareWithArgsSupplier")
public void createShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createShareWithArgsSupplier() {
return Stream.of(Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createWithResponse(metadata, quota)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode, errMessage));
}
private static Stream<Arguments> createShareWithInvalidArgsSupplier() {
return Stream.of(
Arguments.of(Collections.singletonMap("", "value"), 1, 400, ShareErrorCode.EMPTY_METADATA_KEY),
Arguments.of(Collections.singletonMap("metadata!", "value"), 1, 400, ShareErrorCode.INVALID_METADATA),
Arguments.of(testMetadata, 6000, 400, ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void createSnapshot() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(null)).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder()
.shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot())
.buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotError() {
StepVerifier.create(primaryShareAsyncClient.createSnapshot()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createSnapshotMetadata() {
primaryShareAsyncClient.create().block();
String shareSnapshotName = generatePathName();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(testMetadata))
.assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 201);
ShareClient shareSnapshotClient = new ShareClientBuilder().shareName(shareSnapshotName)
.connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString())
.snapshot(it.getValue().getSnapshot()).buildClient();
assertEquals(it.getValue().getSnapshot(), shareSnapshotClient.getSnapshotId());
}).verifyComplete();
}
@Test
public void createSnapshotMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createSnapshotWithResponse(Collections.singletonMap("", "value")))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsShare() {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(null, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsShareThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<ShareInfo> initialResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
Response<ShareInfo> secondResponse = client.createIfNotExistsWithResponse(new ShareCreateOptions()).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@ParameterizedTest
@MethodSource("createIfNotExistsShareWithArgsSupplier")
public void createIfNotExistsShareWithArgs(Map<String, String> metadata, Integer quota) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
private static Stream<Arguments> createIfNotExistsShareWithArgsSupplier() {
return Stream.of(
Arguments.of(null, null),
Arguments.of(null, 1),
Arguments.of(testMetadata, null),
Arguments.of(testMetadata, 1));
}
@ParameterizedTest
@MethodSource("createShareWithInvalidArgsSupplier")
public void createIfNotExistsShareWithInvalidArgs(Map<String, String> metadata, Integer quota, int statusCode,
ShareErrorCode errMessage) {
StepVerifier.create(primaryShareAsyncClient.createIfNotExistsWithResponse(
new ShareCreateOptions().setMetadata(metadata).setQuotaInGb(quota)))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMessage));
}
@Test
public void deleteShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteWithResponse())
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteShareError() {
StepVerifier.create(primaryShareAsyncClient.delete())
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void deleteIfExistsShare() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteIfExistsWithResponse(null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201));
}
@Test
public void deleteIfExistsShareThatDoesNotExist() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
Response<Boolean> response = client.deleteIfExistsWithResponse(null, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
assertNotEquals(Boolean.TRUE, client.exists().block());
}
@Test
public void deleteIfExistsDirectoryThatWasAlreadyDeleted() {
primaryShareAsyncClient.create().block();
Response<Boolean> initialResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
sleepIfRunningAgainstService(45000);
Response<Boolean> secondResponse = primaryShareAsyncClient.deleteIfExistsWithResponse(null, null).block();
assertNotNull(initialResponse);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 202);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 404);
assertTrue(initialResponse.getValue());
assertFalse(secondResponse.getValue());
}
@Test
public void getProperties() {
primaryShareAsyncClient.createWithResponse(testMetadata, 1).block();
StepVerifier.create(primaryShareAsyncClient.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(testMetadata, it.getValue().getMetadata());
assertEquals(it.getValue().getQuota(), 1);
}).verifyComplete();
}
@Test
public void getPropertiesError() {
StepVerifier.create(primaryShareAsyncClient.getProperties()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@PlaybackOnly
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getPropertiesPremium(String protocol, ShareRootSquash rootSquash) {
ShareProtocols enabledProtocol = ModelHelper.parseShareProtocols(protocol);
ShareAsyncClient premiumShare = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(), new ShareCreateOptions()
.setMetadata(testMetadata).setProtocols(enabledProtocol).setRootSquash(rootSquash), null)
.block()).getValue();
StepVerifier.create(premiumShare.getPropertiesWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertNotNull(it.getValue());
assertEquals(testMetadata, it.getValue().getMetadata());
assertNotNull(it.getValue().getProvisionedIops());
assertNotNull(it.getValue().getProvisionedBandwidthMiBps());
assertNotNull(it.getValue().getNextAllowedQuotaDowngradeTime());
assertEquals(enabledProtocol.toString(), it.getValue().getProtocols().toString());
assertEquals(rootSquash, it.getValue().getRootSquash());
}).verifyComplete();
}
@PlaybackOnly
@Test
public void setPremiumProperties() {
List<ShareRootSquash> rootSquashes = Arrays.asList(
ShareRootSquash.ALL_SQUASH,
ShareRootSquash.NO_ROOT_SQUASH,
ShareRootSquash.ROOT_SQUASH);
for (ShareRootSquash rootSquash : rootSquashes) {
ShareAsyncClient premiumShareClient = Objects.requireNonNull(
premiumFileServiceAsyncClient.createShareWithResponse(generateShareName(),
new ShareCreateOptions().setProtocols(new ShareProtocols().setNfsEnabled(true)), null)
.block()).getValue();
premiumShareClient.setProperties(new ShareSetPropertiesOptions().setRootSquash(rootSquash)).block();
StepVerifier.create(premiumShareClient.getProperties()).assertNext(it ->
assertEquals(rootSquash, it.getRootSquash())).verifyComplete();
}
}
@Test
public void setQuota() {
primaryShareAsyncClient.createWithResponse(null, 1).block();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 1)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setQuotaWithResponse(2)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(it.getQuota(), 2)).verifyComplete();
}
@Test
public void setQuotaError() {
StepVerifier.create(primaryShareAsyncClient.setQuota(2)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void setMetadata() {
primaryShareAsyncClient.createWithResponse(testMetadata, null).block();
Map<String, String> metadataAfterSet = Collections.singletonMap("afterset", "value");
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(testMetadata, it.getMetadata())).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.setMetadataWithResponse(metadataAfterSet))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
StepVerifier.create(primaryShareAsyncClient.getProperties()).assertNext(it ->
assertEquals(metadataAfterSet, it.getMetadata())).verifyComplete();
}
@Test
public void setMetadataError() {
StepVerifier.create(primaryShareAsyncClient.setMetadata(testMetadata)).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void getStatistics(long size, int gigabytes) {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile("tempFile", size).block();
StepVerifier.create(primaryShareAsyncClient.getStatisticsWithResponse()).assertNext(it -> {
FileShareTestHelper.assertResponseStatusCode(it, 200);
assertEquals(it.getValue().getShareUsageInBytes(), size);
assertEquals(it.getValue().getShareUsageInGB(), gigabytes);
}).verifyComplete();
}
@Test
public void getStatisticsError() {
StepVerifier.create(primaryShareAsyncClient.getStatistics()).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.SHARE_NOT_FOUND));
}
@Test
public void createDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectory("test/directory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null, null,
testMetadata)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testCreateDirectory", smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryWithResponse("testdirectory", null, null,
Collections.singletonMap("", "value"))).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400, ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createIfNotExistsDirectory() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions())).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createIfNotExistsDirectoryThatAlreadyExists() {
ShareAsyncClient client = premiumFileServiceAsyncClient.getShareAsyncClient(generateShareName());
client.create().block();
Response<ShareDirectoryAsyncClient> initialResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
Response<ShareDirectoryAsyncClient> secondResponse = client.createDirectoryIfNotExistsWithResponse(
"testCreateDirectory", new ShareDirectoryCreateOptions()).block();
assertNotNull(initialResponse);
FileShareTestHelper.assertResponseStatusCode(initialResponse, 201);
assertNotNull(secondResponse);
FileShareTestHelper.assertResponseStatusCode(secondResponse, 409);
}
@Test
public void createIfNotExistsDirectoryInvalidName() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExists("test/directory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.PARENT_NOT_FOUND));
}
@Test
public void createIfNotExistsDirectoryMetadata() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setMetadata(testMetadata)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setFilePermission(FILE_PERMISSION)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testCreateDirectory",
new ShareDirectoryCreateOptions().setSmbProperties(smbProperties)))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createIfNotExistsDirectoryMetadataError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createDirectoryIfNotExistsWithResponse("testdirectory",
new ShareDirectoryCreateOptions().setMetadata(Collections.singletonMap("", "value"))))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.EMPTY_METADATA_KEY));
}
@Test
public void createFile() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createFileFilePermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null,
FILE_PERMISSION, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201))
.verifyComplete();
}
@Test
public void createFileFilePermissionKey() {
primaryShareAsyncClient.create().block();
String permissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now())
.setFilePermissionKey(permissionKey);
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, smbProperties,
null, null)).assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileInvalidArgs(String fileName, long maxSize, int statusCode, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, null, null, null, null))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, statusCode,
errMsg));
}
@Test
public void createFileLease() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease()
.block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void createFileLeaseFail() {
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.getFileClient("testCreateFile").create(512).block();
createLeaseClient(primaryShareAsyncClient.getFileClient("testCreateFile")).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, null, null, null,
null, new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createFileMaxOverload() {
primaryShareAsyncClient.create().block();
ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("txt");
smbProperties.setFileCreationTime(testResourceNamer.now())
.setFileLastWriteTime(testResourceNamer.now());
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse("testCreateFile", 1024, httpHeaders,
smbProperties, FILE_PERMISSION, testMetadata)).assertNext(it ->
FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@ParameterizedTest
@MethodSource("com.azure.storage.file.share.FileShareTestHelper
public void createFileMaxOverloadInvalidArgs(String fileName, long maxSize, ShareFileHttpHeaders httpHeaders,
Map<String, String> metadata, ShareErrorCode errMsg) {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createFileWithResponse(fileName, maxSize, httpHeaders, null, null,
metadata)).verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
errMsg));
}
@Test
public void deleteDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteDirectoryError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectory("testdirectory"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404,
ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsDirectory() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createDirectory(directoryName).block();
StepVerifier.create(primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsDirectoryThatDoesNotExist() {
String directoryName = "testCreateDirectory";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteDirectoryIfExistsWithResponse(directoryName).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void deleteFileError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.deleteFile("testdirectory")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 404, ShareErrorCode.RESOURCE_NOT_FOUND));
}
@Test
public void deleteIfExistsFile() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 202)).verifyComplete();
}
@Test
public void deleteIfExistsFileLease() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
String leaseId = createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(leaseId))).expectNextCount(1).verifyComplete();
}
@Test
public void deleteIfExistsFileThatDoesNotExist() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
Response<Boolean> response = primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName, null).block();
assertNotNull(response);
assertFalse(response.getValue());
FileShareTestHelper.assertResponseStatusCode(response, 404);
}
@Test
public void deleteIfExistsFileLeaseFail() {
String fileName = "testCreateFile";
primaryShareAsyncClient.create().block();
primaryShareAsyncClient.createFile(fileName, 1024).block();
createLeaseClient(primaryShareAsyncClient.getFileClient(fileName)).acquireLease().block();
StepVerifier.create(primaryShareAsyncClient.deleteFileIfExistsWithResponse(fileName,
new ShareRequestConditions().setLeaseId(testResourceNamer.randomUuid())))
.verifyError(ShareStorageException.class);
}
@Test
public void createPermission() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse(FILE_PERMISSION))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 201)).verifyComplete();
}
@Test
public void createAndGetPermission() {
primaryShareAsyncClient.create().block();
String filePermissionKey = primaryShareAsyncClient.createPermission(FILE_PERMISSION).block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse(filePermissionKey))
.assertNext(it -> FileShareTestHelper.assertResponseStatusCode(it, 200)).verifyComplete();
}
@Test
public void createPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.createPermissionWithResponse("abcde")).verifyErrorSatisfies(it ->
FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.fromString("FileInvalidPermission")));
}
@Test
public void getPermissionError() {
primaryShareAsyncClient.create().block();
StepVerifier.create(primaryShareAsyncClient.getPermissionWithResponse("abcde"))
.verifyErrorSatisfies(it -> FileShareTestHelper.assertExceptionStatusCodeAndMessage(it, 400,
ShareErrorCode.INVALID_HEADER_VALUE));
}
@Test
public void getSnapshotId() {
String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString();
ShareAsyncClient shareSnapshotClient = shareBuilderHelper(shareName).snapshot(snapshot).buildAsyncClient();
assertEquals(snapshot, shareSnapshotClient.getSnapshotId());
}
@Test
public void getShareName() {
assertEquals(shareName, primaryShareAsyncClient.getShareName());
}
@Test
public void defaultAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder().shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP))
.audience(null)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void storageAccountAudience() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience(primaryShareAsyncClient.getAccountName()))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@Test
public void audienceError() {
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(ShareAudience.createShareServiceAccountAudience("badaudience"))
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.verifyErrorSatisfies(r -> {
ShareStorageException e = assertInstanceOf(ShareStorageException.class, r);
assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode());
});
}
@Test
public void audienceFromString() {
String url = String.format("https:
ShareAudience audience = ShareAudience.fromString(url);
primaryShareAsyncClient.create().block();
ShareAsyncClient aadShareClient = getOAuthShareClientBuilder(new ShareClientBuilder())
.shareName(shareName)
.shareTokenIntent(ShareTokenIntent.BACKUP)
.audience(audience)
.buildAsyncClient();
String permission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-"
+ "1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-"
+ "188441444-3053964)S:NO_ACCESS_CONTROL";
StepVerifier.create(aadShareClient.createPermission(permission))
.assertNext(r -> assertNotNull(r))
.verifyComplete();
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-05-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
public void createEnableSnapshotVirtualDirectoryAccess(Boolean enableSnapshotVirtualDirectoryAccess) {
ShareCreateOptions options = new ShareCreateOptions();
ShareProtocols protocols = ModelHelper.parseShareProtocols(Constants.HeaderConstants.NFS_PROTOCOL);
options.setProtocols(protocols);
options.setSnapshotVirtualDirectoryAccessEnabled(enableSnapshotVirtualDirectoryAccess);
primaryShareAsyncClient.createWithResponse(options).block();
StepVerifier.create(primaryShareAsyncClient.getProperties())
.assertNext(r -> {
assertEquals(protocols.toString(), r.getProtocols().toString());
if (enableSnapshotVirtualDirectoryAccess == null || enableSnapshotVirtualDirectoryAccess) {
assertTrue(r.isEnableSnapshotVirtualDirectoryAccess());
} else {
assertFalse(r.isEnableSnapshotVirtualDirectoryAccess());
}
})
.verifyComplete();
}
private static Stream<Arguments> createEnableSnapshotVirtualDirectoryAccessSupplier() {
return Stream.of(
Arguments.of(true),
Arguments.of(false),
Arguments.of((Boolean) null));
}
@RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-08-04")
@ParameterizedTest
@MethodSource("createEnableSnapshotVirtualDirectoryAccessSupplier")
} |
don't we need this for the json representation though? | static String writeJsonFragment(Map<String, Object> context) {
if (CoreUtils.isNullOrEmpty(context)) {
return "";
}
int speculatedSize = context.size() * 20;
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeMap(context, JsonWriter::writeUntyped).flush();
return outputStream.toString(StandardCharsets.UTF_8).substring(1, outputStream.size() - 1);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
} | static String writeJsonFragment(Map<String, Object> context) {
if (CoreUtils.isNullOrEmpty(context)) {
return "";
}
int speculatedSize = context.size() * 20;
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeMap(context, JsonWriter::writeUntyped).flush();
return outputStream.toString(StandardCharsets.UTF_8).substring(1, outputStream.size() - 1);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
} | class LoggingEventBuilder {
private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false);
private final Logger logger;
private final LogLevel level;
private List<ContextKeyValuePair> context;
private final String globalContextCached;
private final boolean hasGlobalContext;
private final boolean isEnabled;
/**
* Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}.
* If level is disabled, returns no-op instance.
*/
static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized,
boolean canLogAtLevel) {
if (canLogAtLevel) {
return new LoggingEventBuilder(logger, level, globalContextSerialized, true);
}
return NOOP;
}
private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) {
this.logger = logger;
this.level = level;
this.isEnabled = isEnabled;
this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized;
this.hasGlobalContext = !this.globalContextCached.isEmpty();
}
/**
* Adds key with String value pair to the context of current log being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atInfo -->
* <pre>
* logger.atInfo&
* .addKeyValue&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atInfo -->
*
* @param key String key.
* @param value String value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, String value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with Object value to the context of current log being created.
* If logging is enabled at given level, and object is not null, uses {@code value.toString()} to
* serialize object.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* &
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key String key.
* @param value Object value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Object value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds a key with a boolean value to the context of the current log being created.
*
* @param key Key to associate the provided {@code value} with.
* @param value The boolean value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, boolean value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with long value to the context of current log event being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding a long value to the logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key Key to associate the provided {@code value} with.
* @param value The long value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, long value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with String value supplier to the context of current log event being created.
*
* @param key String key.
* @param valueSupplier String value supplier function.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) {
if (this.isEnabled) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, valueSupplier));
}
return this;
}
/**
* Logs message annotated with context.
*
* @param message log message.
*/
public void log(String message) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (isEmptyMessage(message)) {
return;
}
performLogging(level, getMessageWithContext(message), (Throwable) null);
}
}
/**
* Logs message annotated with context.
*
* @param message log message.
* @param throwable {@link Throwable} for the message.
* @param <T> Type of the Throwable being logged.
*
* @return The passed {@link Throwable}.
*/
public <T extends Throwable> T log(String message, T throwable) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (throwable != null) {
addKeyValueInternal("exception.message", throwable.getMessage());
if (logger instanceof DefaultLogger && logger.isDebugEnabled()) {
addKeyValue("exception.stacktrace", getStackTrace(throwable));
}
}
String messageWithContext = getMessageWithContext(message);
performLogging(level, messageWithContext, logger.isDebugEnabled() ? throwable : null);
}
return throwable;
}
private String getStackTrace(Throwable t) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
t.printStackTrace(pw);
return sw.toString().trim();
}
private boolean isEmptyMessage(String message) {
return CoreUtils.isNullOrEmpty(message)
&& CoreUtils.isNullOrEmpty(context)
&& !hasGlobalContext;
}
private String getMessageWithContext(String message) {
if (message == null) {
message = "";
}
int speculatedSize = 20 + (context == null ? 0 : context.size()) * 20 + message.length()
+ globalContextCached.length();
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeStartObject()
.writeStringField("message", message)
.flush();
if (hasGlobalContext) {
outputStream.write(',');
outputStream.write(globalContextCached.getBytes(StandardCharsets.UTF_8));
}
if (context != null) {
for (ContextKeyValuePair contextKeyValuePair : context) {
contextKeyValuePair.write(jsonWriter);
}
}
jsonWriter.writeEndObject().flush();
return outputStream.toString(StandardCharsets.UTF_8);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
private void addKeyValueInternal(String key, Object value) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, value));
}
private void performLogging(LogLevel logLevel, String message, Throwable throwable) {
switch (logLevel) {
case VERBOSE:
logger.debug(message, throwable);
break;
case INFORMATIONAL:
logger.info(message, throwable);
break;
case WARNING:
logger.warn(message, throwable);
break;
case ERROR:
logger.error(message, throwable);
break;
default:
break;
}
}
/**
* Serializes passed map to string containing valid JSON fragment:
* e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma.
* <p>
* For complex object serialization, it calls {@code toString()} guarded with null check.
*
* @param context to serialize.
*
* @return Serialized JSON fragment or an empty string.
*/
private static final class ContextKeyValuePair {
private final String key;
private final Object value;
private final Supplier<String> valueSupplier;
ContextKeyValuePair(String key, Object value) {
this.key = key;
this.value = value;
this.valueSupplier = null;
}
ContextKeyValuePair(String key, Supplier<String> valueSupplier) {
this.key = key;
this.value = null;
this.valueSupplier = valueSupplier;
}
/**
* Writes "key":"value" json string to provided StringBuilder.
*/
void write(JsonWriter jsonWriter) throws IOException {
if (valueSupplier == null) {
jsonWriter.writeUntypedField(key, value);
} else {
jsonWriter.writeUntypedField(key, valueSupplier.get());
}
}
}
} | class LoggingEventBuilder {
private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false);
private final Logger logger;
private final LogLevel level;
private List<ContextKeyValuePair> context;
private final String globalContextCached;
private final boolean hasGlobalContext;
private final boolean isEnabled;
/**
* Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}.
* If level is disabled, returns no-op instance.
*/
static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized,
boolean canLogAtLevel) {
if (canLogAtLevel) {
return new LoggingEventBuilder(logger, level, globalContextSerialized, true);
}
return NOOP;
}
private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) {
this.logger = logger;
this.level = level;
this.isEnabled = isEnabled;
this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized;
this.hasGlobalContext = !this.globalContextCached.isEmpty();
}
/**
* Adds key with String value pair to the context of current log being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atInfo -->
* <pre>
* logger.atInfo&
* .addKeyValue&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atInfo -->
*
* @param key String key.
* @param value String value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, String value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with Object value to the context of current log being created.
* If logging is enabled at given level, and object is not null, uses {@code value.toString()} to
* serialize object.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* &
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key String key.
* @param value Object value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Object value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds a key with a boolean value to the context of the current log being created.
*
* @param key Key to associate the provided {@code value} with.
* @param value The boolean value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, boolean value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with long value to the context of current log event being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding a long value to the logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key Key to associate the provided {@code value} with.
* @param value The long value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, long value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with String value supplier to the context of current log event being created.
*
* @param key String key.
* @param valueSupplier String value supplier function.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) {
if (this.isEnabled) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, valueSupplier));
}
return this;
}
/**
* Logs message annotated with context.
*
* @param message log message.
*/
public void log(String message) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (isEmptyMessage(message)) {
return;
}
performLogging(level, getMessageWithContext(message), (Throwable) null);
}
}
/**
* Logs message annotated with context.
*
* @param message log message.
* @param throwable {@link Throwable} for the message.
* @param <T> Type of the Throwable being logged.
*
* @return The passed {@link Throwable}.
*/
public <T extends Throwable> T log(String message, T throwable) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (throwable != null) {
addKeyValueInternal("exception.message", throwable.getMessage());
if (logger instanceof DefaultLogger && logger.isDebugEnabled()) {
addKeyValue("exception.stacktrace", getStackTrace(throwable));
}
}
String messageWithContext = getMessageWithContext(message);
performLogging(level, messageWithContext, logger.isDebugEnabled() ? throwable : null);
}
return throwable;
}
private String getStackTrace(Throwable t) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
t.printStackTrace(pw);
return sw.toString().trim();
}
private boolean isEmptyMessage(String message) {
return CoreUtils.isNullOrEmpty(message)
&& CoreUtils.isNullOrEmpty(context)
&& !hasGlobalContext;
}
private String getMessageWithContext(String message) {
if (message == null) {
message = "";
}
int speculatedSize = 20 + (context == null ? 0 : context.size()) * 20 + message.length()
+ globalContextCached.length();
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeStartObject()
.writeStringField("message", message)
.flush();
if (hasGlobalContext) {
outputStream.write(',');
outputStream.write(globalContextCached.getBytes(StandardCharsets.UTF_8));
}
if (context != null) {
for (ContextKeyValuePair contextKeyValuePair : context) {
contextKeyValuePair.write(jsonWriter);
}
}
jsonWriter.writeEndObject().flush();
return outputStream.toString(StandardCharsets.UTF_8);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
private void addKeyValueInternal(String key, Object value) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, value));
}
private void performLogging(LogLevel logLevel, String message, Throwable throwable) {
switch (logLevel) {
case VERBOSE:
logger.debug(message, throwable);
break;
case INFORMATIONAL:
logger.info(message, throwable);
break;
case WARNING:
logger.warn(message, throwable);
break;
case ERROR:
logger.error(message, throwable);
break;
default:
break;
}
}
/**
* Serializes passed map to string containing valid JSON fragment:
* e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma.
* <p>
* For complex object serialization, it calls {@code toString()} guarded with null check.
*
* @param context to serialize.
*
* @return Serialized JSON fragment or an empty string.
*/
private static final class ContextKeyValuePair {
private final String key;
private final Object value;
private final Supplier<String> valueSupplier;
ContextKeyValuePair(String key, Object value) {
this.key = key;
this.value = value;
this.valueSupplier = null;
}
ContextKeyValuePair(String key, Supplier<String> valueSupplier) {
this.key = key;
this.value = null;
this.valueSupplier = valueSupplier;
}
/**
* Writes "key":"value" json string to provided StringBuilder.
*/
void write(JsonWriter jsonWriter) throws IOException {
if (valueSupplier == null) {
jsonWriter.writeUntypedField(key, value);
} else {
jsonWriter.writeUntypedField(key, valueSupplier.get());
}
}
}
} | |
Diff isn't great here which makes this confusing. This is the method created the pre-JSONified global context which takes the form of `"key1":"value1",...`, as it gets injected as a list of key-value pairs (or a JSON fragment). The log continues to be a fully form JSON string with leading `{` and trailing `}` | static String writeJsonFragment(Map<String, Object> context) {
if (CoreUtils.isNullOrEmpty(context)) {
return "";
}
int speculatedSize = context.size() * 20;
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeMap(context, JsonWriter::writeUntyped).flush();
return outputStream.toString(StandardCharsets.UTF_8).substring(1, outputStream.size() - 1);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
} | static String writeJsonFragment(Map<String, Object> context) {
if (CoreUtils.isNullOrEmpty(context)) {
return "";
}
int speculatedSize = context.size() * 20;
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeMap(context, JsonWriter::writeUntyped).flush();
return outputStream.toString(StandardCharsets.UTF_8).substring(1, outputStream.size() - 1);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
} | class LoggingEventBuilder {
private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false);
private final Logger logger;
private final LogLevel level;
private List<ContextKeyValuePair> context;
private final String globalContextCached;
private final boolean hasGlobalContext;
private final boolean isEnabled;
/**
* Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}.
* If level is disabled, returns no-op instance.
*/
static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized,
boolean canLogAtLevel) {
if (canLogAtLevel) {
return new LoggingEventBuilder(logger, level, globalContextSerialized, true);
}
return NOOP;
}
private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) {
this.logger = logger;
this.level = level;
this.isEnabled = isEnabled;
this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized;
this.hasGlobalContext = !this.globalContextCached.isEmpty();
}
/**
* Adds key with String value pair to the context of current log being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atInfo -->
* <pre>
* logger.atInfo&
* .addKeyValue&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atInfo -->
*
* @param key String key.
* @param value String value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, String value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with Object value to the context of current log being created.
* If logging is enabled at given level, and object is not null, uses {@code value.toString()} to
* serialize object.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* &
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key String key.
* @param value Object value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Object value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds a key with a boolean value to the context of the current log being created.
*
* @param key Key to associate the provided {@code value} with.
* @param value The boolean value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, boolean value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with long value to the context of current log event being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding a long value to the logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key Key to associate the provided {@code value} with.
* @param value The long value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, long value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with String value supplier to the context of current log event being created.
*
* @param key String key.
* @param valueSupplier String value supplier function.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) {
if (this.isEnabled) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, valueSupplier));
}
return this;
}
/**
* Logs message annotated with context.
*
* @param message log message.
*/
public void log(String message) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (isEmptyMessage(message)) {
return;
}
performLogging(level, getMessageWithContext(message), (Throwable) null);
}
}
/**
* Logs message annotated with context.
*
* @param message log message.
* @param throwable {@link Throwable} for the message.
* @param <T> Type of the Throwable being logged.
*
* @return The passed {@link Throwable}.
*/
public <T extends Throwable> T log(String message, T throwable) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (throwable != null) {
addKeyValueInternal("exception.message", throwable.getMessage());
if (logger instanceof DefaultLogger && logger.isDebugEnabled()) {
addKeyValue("exception.stacktrace", getStackTrace(throwable));
}
}
String messageWithContext = getMessageWithContext(message);
performLogging(level, messageWithContext, logger.isDebugEnabled() ? throwable : null);
}
return throwable;
}
private String getStackTrace(Throwable t) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
t.printStackTrace(pw);
return sw.toString().trim();
}
private boolean isEmptyMessage(String message) {
return CoreUtils.isNullOrEmpty(message)
&& CoreUtils.isNullOrEmpty(context)
&& !hasGlobalContext;
}
private String getMessageWithContext(String message) {
if (message == null) {
message = "";
}
int speculatedSize = 20 + (context == null ? 0 : context.size()) * 20 + message.length()
+ globalContextCached.length();
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeStartObject()
.writeStringField("message", message)
.flush();
if (hasGlobalContext) {
outputStream.write(',');
outputStream.write(globalContextCached.getBytes(StandardCharsets.UTF_8));
}
if (context != null) {
for (ContextKeyValuePair contextKeyValuePair : context) {
contextKeyValuePair.write(jsonWriter);
}
}
jsonWriter.writeEndObject().flush();
return outputStream.toString(StandardCharsets.UTF_8);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
private void addKeyValueInternal(String key, Object value) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, value));
}
private void performLogging(LogLevel logLevel, String message, Throwable throwable) {
switch (logLevel) {
case VERBOSE:
logger.debug(message, throwable);
break;
case INFORMATIONAL:
logger.info(message, throwable);
break;
case WARNING:
logger.warn(message, throwable);
break;
case ERROR:
logger.error(message, throwable);
break;
default:
break;
}
}
/**
* Serializes passed map to string containing valid JSON fragment:
* e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma.
* <p>
* For complex object serialization, it calls {@code toString()} guarded with null check.
*
* @param context to serialize.
*
* @return Serialized JSON fragment or an empty string.
*/
private static final class ContextKeyValuePair {
private final String key;
private final Object value;
private final Supplier<String> valueSupplier;
ContextKeyValuePair(String key, Object value) {
this.key = key;
this.value = value;
this.valueSupplier = null;
}
ContextKeyValuePair(String key, Supplier<String> valueSupplier) {
this.key = key;
this.value = null;
this.valueSupplier = valueSupplier;
}
/**
* Writes "key":"value" json string to provided StringBuilder.
*/
void write(JsonWriter jsonWriter) throws IOException {
if (valueSupplier == null) {
jsonWriter.writeUntypedField(key, value);
} else {
jsonWriter.writeUntypedField(key, valueSupplier.get());
}
}
}
} | class LoggingEventBuilder {
private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false);
private final Logger logger;
private final LogLevel level;
private List<ContextKeyValuePair> context;
private final String globalContextCached;
private final boolean hasGlobalContext;
private final boolean isEnabled;
/**
* Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}.
* If level is disabled, returns no-op instance.
*/
static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized,
boolean canLogAtLevel) {
if (canLogAtLevel) {
return new LoggingEventBuilder(logger, level, globalContextSerialized, true);
}
return NOOP;
}
private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) {
this.logger = logger;
this.level = level;
this.isEnabled = isEnabled;
this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized;
this.hasGlobalContext = !this.globalContextCached.isEmpty();
}
/**
* Adds key with String value pair to the context of current log being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atInfo -->
* <pre>
* logger.atInfo&
* .addKeyValue&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atInfo -->
*
* @param key String key.
* @param value String value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, String value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with Object value to the context of current log being created.
* If logging is enabled at given level, and object is not null, uses {@code value.toString()} to
* serialize object.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding string value to logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* &
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key String key.
* @param value Object value.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Object value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds a key with a boolean value to the context of the current log being created.
*
* @param key Key to associate the provided {@code value} with.
* @param value The boolean value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, boolean value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with long value to the context of current log event being created.
*
* <p><strong>Code samples</strong></p>
*
* <p>Adding a long value to the logging event context.</p>
*
* <!-- src_embed io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
* <pre>
* logger.atVerbose&
* .addKeyValue&
* .log&
* </pre>
* <!-- end io.clientcore.core.util.logging.clientlogger.atverbose.addKeyValue
*
* @param key Key to associate the provided {@code value} with.
* @param value The long value.
* @return The updated {@link LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, long value) {
if (this.isEnabled) {
addKeyValueInternal(key, value);
}
return this;
}
/**
* Adds key with String value supplier to the context of current log event being created.
*
* @param key String key.
* @param valueSupplier String value supplier function.
* @return The updated {@code LoggingEventBuilder} object.
*/
public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) {
if (this.isEnabled) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, valueSupplier));
}
return this;
}
/**
* Logs message annotated with context.
*
* @param message log message.
*/
public void log(String message) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (isEmptyMessage(message)) {
return;
}
performLogging(level, getMessageWithContext(message), (Throwable) null);
}
}
/**
* Logs message annotated with context.
*
* @param message log message.
* @param throwable {@link Throwable} for the message.
* @param <T> Type of the Throwable being logged.
*
* @return The passed {@link Throwable}.
*/
public <T extends Throwable> T log(String message, T throwable) {
if (this.isEnabled) {
message = removeNewLinesFromLogMessage(message);
if (throwable != null) {
addKeyValueInternal("exception.message", throwable.getMessage());
if (logger instanceof DefaultLogger && logger.isDebugEnabled()) {
addKeyValue("exception.stacktrace", getStackTrace(throwable));
}
}
String messageWithContext = getMessageWithContext(message);
performLogging(level, messageWithContext, logger.isDebugEnabled() ? throwable : null);
}
return throwable;
}
private String getStackTrace(Throwable t) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
t.printStackTrace(pw);
return sw.toString().trim();
}
private boolean isEmptyMessage(String message) {
return CoreUtils.isNullOrEmpty(message)
&& CoreUtils.isNullOrEmpty(context)
&& !hasGlobalContext;
}
private String getMessageWithContext(String message) {
if (message == null) {
message = "";
}
int speculatedSize = 20 + (context == null ? 0 : context.size()) * 20 + message.length()
+ globalContextCached.length();
try (AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(speculatedSize);
JsonWriter jsonWriter = JsonProviders.createWriter(outputStream)) {
jsonWriter.writeStartObject()
.writeStringField("message", message)
.flush();
if (hasGlobalContext) {
outputStream.write(',');
outputStream.write(globalContextCached.getBytes(StandardCharsets.UTF_8));
}
if (context != null) {
for (ContextKeyValuePair contextKeyValuePair : context) {
contextKeyValuePair.write(jsonWriter);
}
}
jsonWriter.writeEndObject().flush();
return outputStream.toString(StandardCharsets.UTF_8);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
private void addKeyValueInternal(String key, Object value) {
if (this.context == null) {
this.context = new ArrayList<>();
}
this.context.add(new ContextKeyValuePair(key, value));
}
private void performLogging(LogLevel logLevel, String message, Throwable throwable) {
switch (logLevel) {
case VERBOSE:
logger.debug(message, throwable);
break;
case INFORMATIONAL:
logger.info(message, throwable);
break;
case WARNING:
logger.warn(message, throwable);
break;
case ERROR:
logger.error(message, throwable);
break;
default:
break;
}
}
/**
* Serializes passed map to string containing valid JSON fragment:
* e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma.
* <p>
* For complex object serialization, it calls {@code toString()} guarded with null check.
*
* @param context to serialize.
*
* @return Serialized JSON fragment or an empty string.
*/
private static final class ContextKeyValuePair {
private final String key;
private final Object value;
private final Supplier<String> valueSupplier;
ContextKeyValuePair(String key, Object value) {
this.key = key;
this.value = value;
this.valueSupplier = null;
}
ContextKeyValuePair(String key, Supplier<String> valueSupplier) {
this.key = key;
this.value = null;
this.valueSupplier = valueSupplier;
}
/**
* Writes "key":"value" json string to provided StringBuilder.
*/
void write(JsonWriter jsonWriter) throws IOException {
if (valueSupplier == null) {
jsonWriter.writeUntypedField(key, value);
} else {
jsonWriter.writeUntypedField(key, valueSupplier.get());
}
}
}
} | |
Make the `HttpHeaderName` a constant | static boolean shouldResponseBeRetried(int statusCode, boolean isPrimary, HttpResponse response) {
/*
* Retry the request if the server had an error (500), was unavailable (503), or requested a backoff (429),
* or if the secondary was being tried and the resources didn't exist there (404). Only the secondary can retry
* if the resource wasn't found as there may be a delay in replication from the primary.
*/
if (response != null) {
String headerValue = response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-copy-source-error-code"));
if (headerValue != null) {
switch (headerValue) {
case "" + 429:
case "" + 500:
case "" + 503:
return true;
case "" + 404:
return !isPrimary;
default:
break;
}
}
}
return (statusCode == 429 || statusCode == 500 || statusCode == 503)
|| (!isPrimary && statusCode == 404);
} | String headerValue = response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-copy-source-error-code")); | static boolean shouldResponseBeRetried(int statusCode, boolean isPrimary, HttpResponse response) {
/*
* Retry the request if the server had an error (500), was unavailable (503), or requested a backoff (429),
* or if the secondary was being tried and the resources didn't exist there (404). Only the secondary can retry
* if the resource wasn't found as there may be a delay in replication from the primary.
*/
boolean headerRetry = false;
boolean statusCodeRetry = (statusCode == 429 || statusCode == 500 || statusCode == 503) || (!isPrimary && statusCode == 404);
if (response != null && response.getHeaders() != null) {
String headerValue = response.getHeaders().getValue(X_MS_COPY_SOURCE_ERROR_CODE);
if (headerValue != null) {
headerRetry = ("429".equals(headerValue) || "500".equals(headerValue) || "503".equals(headerValue))
|| (!isPrimary && "404".equals(headerValue));
}
}
return statusCodeRetry || headerRetry;
} | class RequestRetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RequestRetryPolicy.class);
private final RequestRetryOptions requestRetryOptions;
/**
* Constructs the policy using the retry options.
*
* @param requestRetryOptions Retry options for the policy.
*/
public RequestRetryPolicy(RequestRetryOptions requestRetryOptions) {
this.requestRetryOptions = requestRetryOptions;
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return this.attemptSync(context, next, originalHttpRequest, considerSecondary, 1, 1, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
Flux<ByteBuffer> bufferedBody = context.getHttpRequest().getBody().map(ByteBuffer::duplicate);
context.getHttpRequest().setBody(bufferedBody);
}
return this.attemptAsync(context, next, context.getHttpRequest(), considerSecondary, 1, 1, null);
}
/**
* This method actually attempts to send the request and determines if we should attempt again and, if so, how long
* to wait before sending out the next request.
* <p>
* Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or
* an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary;
* even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If
* secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a
* secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
* @param context The request to try.
* @param next The next policy to apply to the request.
* @param originalRequest The unmodified original request.
* @param considerSecondary Before each try, we'll select either the primary or secondary URL if appropriate.
* @param primaryTry Number of attempts against the primary DC.
* @param attempt This indicates the total number of attempts to send the request.
* @param suppressed The list of throwables that has been suppressed.
* @return A single containing either the successful response or an error that was not retryable because either the
* {@code maxTries} was exceeded or retries will not mitigate the issue.
*/
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalRequest, boolean considerSecondary, int primaryTry, int attempt,
List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
try {
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
} catch (IllegalArgumentException e) {
return Mono.error(e);
}
updateRetryCountContext(context, attempt);
resetProgress(context);
Mono<HttpResponse> responseMono = next.clone().process();
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
responseMono = responseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
if (delayMs > 0) {
responseMono = responseMono.delaySubscription(Duration.ofMillis(delayMs));
}
return responseMono.flatMap(response -> {
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
Flux<ByteBuffer> responseBody = response.getBody();
response.close();
if (responseBody == null) {
return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
} else {
return responseBody
.ignoreElements()
.then(attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed));
}
}
return Mono.just(response);
}).onErrorResume(throwable -> {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
return Mono.error(new IllegalStateException("The request failed because the size of the contents of "
+ "the provided Flux did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the Flux not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable));
}
/*
* IOException is a catch-all for IO related errors. Technically it includes many types which may not be
* network exceptions, but we should not hit those unless there is a bug in our logic. In either case, it is
* better to optimistically retry instead of failing too soon. A Timeout Exception is a client-side timeout
* coming from Rx.
*/
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
return Mono.error(throwable);
});
}
private HttpResponse attemptSync(final HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
final HttpRequest originalRequest, final boolean considerSecondary, final int primaryTry, final int attempt,
final List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
updateRetryCountContext(context, attempt);
resetProgress(context);
try {
if (delayMs > 0) {
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
}
/*
* We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound
* operation until after the retry backoff delay, so we call delaySubscription.
*/
Mono<HttpResponse> httpResponseMono = Mono.fromCallable(() -> next.clone().processSync());
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
httpResponseMono = httpResponseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
HttpResponse response = httpResponseMono.block();
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
if (response.getBody() != null) {
response.getBodyAsBinaryData().toByteBuffer();
}
response.close();
return attemptSync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
}
return response;
} catch (RuntimeException throwable) {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
throw LOGGER.logExceptionAsError((new IllegalStateException("The request failed because the size of the contents of "
+ "the provided data did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the data not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable)));
}
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptSync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
throw LOGGER.logExceptionAsError(throwable);
}
}
/*
* Update the RETRY_COUNT_CONTEXT to log retries.
*/
private static void updateRetryCountContext(HttpPipelineCallContext context, int attempt) {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, attempt);
}
private static void resetProgress(HttpPipelineCallContext context) {
ProgressReporter progressReporter = Contexts.with(context.getContext()).getHttpRequestProgressReporter();
if (progressReporter != null) {
progressReporter.reset();
}
}
/*
* Update secondary host on request URL if not trying primary URL.
*/
private static void updateUrlToSecondaryHost(boolean tryingPrimary, String secondaryHost, HttpPipelineCallContext context) {
if (!tryingPrimary) {
UrlBuilder builder = UrlBuilder.parse(context.getHttpRequest().getUrl());
builder.setHost(secondaryHost);
try {
context.getHttpRequest().setUrl(builder.toUrl());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL", e));
}
}
}
static ExceptionRetryStatus shouldErrorBeRetried(Throwable error, int attempt, int maxAttempts) {
Throwable unwrappedThrowable = Exceptions.unwrap(error);
if (attempt >= maxAttempts) {
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
if (unwrappedThrowable instanceof IOException || unwrappedThrowable instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
Throwable causalException = unwrappedThrowable.getCause();
while (causalException != null) {
if (causalException instanceof IOException || causalException instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
causalException = causalException.getCause();
}
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
static final class ExceptionRetryStatus {
final boolean canBeRetried;
final Throwable unwrappedThrowable;
ExceptionRetryStatus(boolean canBeRetried, Throwable unwrappedThrowable) {
this.canBeRetried = canBeRetried;
this.unwrappedThrowable = unwrappedThrowable;
}
}
private long getDelayMs(int primaryTry, boolean tryingPrimary) {
long delayMs;
if (tryingPrimary) {
delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry);
} else {
delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000);
}
return delayMs;
}
private static int getNewPrimaryTry(boolean considerSecondary, int primaryTry, boolean tryingPrimary) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
return (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
}
} | class RequestRetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RequestRetryPolicy.class);
private final RequestRetryOptions requestRetryOptions;
private static final HttpHeaderName X_MS_COPY_SOURCE_ERROR_CODE = HttpHeaderName.fromString("x-ms-copy-source-error-code");
/**
* Constructs the policy using the retry options.
*
* @param requestRetryOptions Retry options for the policy.
*/
public RequestRetryPolicy(RequestRetryOptions requestRetryOptions) {
this.requestRetryOptions = requestRetryOptions;
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return this.attemptSync(context, next, originalHttpRequest, considerSecondary, 1, 1, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
Flux<ByteBuffer> bufferedBody = context.getHttpRequest().getBody().map(ByteBuffer::duplicate);
context.getHttpRequest().setBody(bufferedBody);
}
return this.attemptAsync(context, next, context.getHttpRequest(), considerSecondary, 1, 1, null);
}
/**
* This method actually attempts to send the request and determines if we should attempt again and, if so, how long
* to wait before sending out the next request.
* <p>
* Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or
* an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary;
* even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If
* secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a
* secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
* @param context The request to try.
* @param next The next policy to apply to the request.
* @param originalRequest The unmodified original request.
* @param considerSecondary Before each try, we'll select either the primary or secondary URL if appropriate.
* @param primaryTry Number of attempts against the primary DC.
* @param attempt This indicates the total number of attempts to send the request.
* @param suppressed The list of throwables that has been suppressed.
* @return A single containing either the successful response or an error that was not retryable because either the
* {@code maxTries} was exceeded or retries will not mitigate the issue.
*/
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalRequest, boolean considerSecondary, int primaryTry, int attempt,
List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
try {
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
} catch (IllegalArgumentException e) {
return Mono.error(e);
}
updateRetryCountContext(context, attempt);
resetProgress(context);
Mono<HttpResponse> responseMono = next.clone().process();
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
responseMono = responseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
if (delayMs > 0) {
responseMono = responseMono.delaySubscription(Duration.ofMillis(delayMs));
}
return responseMono.flatMap(response -> {
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
Flux<ByteBuffer> responseBody = response.getBody();
response.close();
if (responseBody == null) {
return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
} else {
return responseBody
.ignoreElements()
.then(attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed));
}
}
return Mono.just(response);
}).onErrorResume(throwable -> {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
return Mono.error(new IllegalStateException("The request failed because the size of the contents of "
+ "the provided Flux did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the Flux not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable));
}
/*
* IOException is a catch-all for IO related errors. Technically it includes many types which may not be
* network exceptions, but we should not hit those unless there is a bug in our logic. In either case, it is
* better to optimistically retry instead of failing too soon. A Timeout Exception is a client-side timeout
* coming from Rx.
*/
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
return Mono.error(throwable);
});
}
private HttpResponse attemptSync(final HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
final HttpRequest originalRequest, final boolean considerSecondary, final int primaryTry, final int attempt,
final List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
updateRetryCountContext(context, attempt);
resetProgress(context);
try {
if (delayMs > 0) {
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
}
/*
* We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound
* operation until after the retry backoff delay, so we call delaySubscription.
*/
Mono<HttpResponse> httpResponseMono = Mono.fromCallable(() -> next.clone().processSync());
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
httpResponseMono = httpResponseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
HttpResponse response = httpResponseMono.block();
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
if (response.getBody() != null) {
response.getBodyAsBinaryData().toByteBuffer();
}
response.close();
return attemptSync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
}
return response;
} catch (RuntimeException throwable) {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
throw LOGGER.logExceptionAsError((new IllegalStateException("The request failed because the size of the contents of "
+ "the provided data did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the data not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable)));
}
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptSync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
throw LOGGER.logExceptionAsError(throwable);
}
}
/*
* Update the RETRY_COUNT_CONTEXT to log retries.
*/
private static void updateRetryCountContext(HttpPipelineCallContext context, int attempt) {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, attempt);
}
private static void resetProgress(HttpPipelineCallContext context) {
ProgressReporter progressReporter = Contexts.with(context.getContext()).getHttpRequestProgressReporter();
if (progressReporter != null) {
progressReporter.reset();
}
}
/*
* Update secondary host on request URL if not trying primary URL.
*/
private static void updateUrlToSecondaryHost(boolean tryingPrimary, String secondaryHost, HttpPipelineCallContext context) {
if (!tryingPrimary) {
UrlBuilder builder = UrlBuilder.parse(context.getHttpRequest().getUrl());
builder.setHost(secondaryHost);
try {
context.getHttpRequest().setUrl(builder.toUrl());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL", e));
}
}
}
static ExceptionRetryStatus shouldErrorBeRetried(Throwable error, int attempt, int maxAttempts) {
Throwable unwrappedThrowable = Exceptions.unwrap(error);
if (attempt >= maxAttempts) {
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
if (unwrappedThrowable instanceof IOException || unwrappedThrowable instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
Throwable causalException = unwrappedThrowable.getCause();
while (causalException != null) {
if (causalException instanceof IOException || causalException instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
causalException = causalException.getCause();
}
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
static final class ExceptionRetryStatus {
final boolean canBeRetried;
final Throwable unwrappedThrowable;
ExceptionRetryStatus(boolean canBeRetried, Throwable unwrappedThrowable) {
this.canBeRetried = canBeRetried;
this.unwrappedThrowable = unwrappedThrowable;
}
}
private long getDelayMs(int primaryTry, boolean tryingPrimary) {
long delayMs;
if (tryingPrimary) {
delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry);
} else {
delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000);
}
return delayMs;
}
private static int getNewPrimaryTry(boolean considerSecondary, int primaryTry, boolean tryingPrimary) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
return (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
}
} |
Should have this be a boolean value and checking the response status and return the or of those. This would cover the edge case where the header value is returned as non-retriable but the status code is retriable. | static boolean shouldResponseBeRetried(int statusCode, boolean isPrimary, HttpResponse response) {
/*
* Retry the request if the server had an error (500), was unavailable (503), or requested a backoff (429),
* or if the secondary was being tried and the resources didn't exist there (404). Only the secondary can retry
* if the resource wasn't found as there may be a delay in replication from the primary.
*/
if (response != null) {
String headerValue = response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-copy-source-error-code"));
if (headerValue != null) {
switch (headerValue) {
case "" + 429:
case "" + 500:
case "" + 503:
return true;
case "" + 404:
return !isPrimary;
default:
break;
}
}
}
return (statusCode == 429 || statusCode == 500 || statusCode == 503)
|| (!isPrimary && statusCode == 404);
} | } | static boolean shouldResponseBeRetried(int statusCode, boolean isPrimary, HttpResponse response) {
/*
* Retry the request if the server had an error (500), was unavailable (503), or requested a backoff (429),
* or if the secondary was being tried and the resources didn't exist there (404). Only the secondary can retry
* if the resource wasn't found as there may be a delay in replication from the primary.
*/
boolean headerRetry = false;
boolean statusCodeRetry = (statusCode == 429 || statusCode == 500 || statusCode == 503) || (!isPrimary && statusCode == 404);
if (response != null && response.getHeaders() != null) {
String headerValue = response.getHeaders().getValue(X_MS_COPY_SOURCE_ERROR_CODE);
if (headerValue != null) {
headerRetry = ("429".equals(headerValue) || "500".equals(headerValue) || "503".equals(headerValue))
|| (!isPrimary && "404".equals(headerValue));
}
}
return statusCodeRetry || headerRetry;
} | class RequestRetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RequestRetryPolicy.class);
private final RequestRetryOptions requestRetryOptions;
/**
* Constructs the policy using the retry options.
*
* @param requestRetryOptions Retry options for the policy.
*/
public RequestRetryPolicy(RequestRetryOptions requestRetryOptions) {
this.requestRetryOptions = requestRetryOptions;
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return this.attemptSync(context, next, originalHttpRequest, considerSecondary, 1, 1, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
Flux<ByteBuffer> bufferedBody = context.getHttpRequest().getBody().map(ByteBuffer::duplicate);
context.getHttpRequest().setBody(bufferedBody);
}
return this.attemptAsync(context, next, context.getHttpRequest(), considerSecondary, 1, 1, null);
}
/**
* This method actually attempts to send the request and determines if we should attempt again and, if so, how long
* to wait before sending out the next request.
* <p>
* Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or
* an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary;
* even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If
* secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a
* secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
* @param context The request to try.
* @param next The next policy to apply to the request.
* @param originalRequest The unmodified original request.
* @param considerSecondary Before each try, we'll select either the primary or secondary URL if appropriate.
* @param primaryTry Number of attempts against the primary DC.
* @param attempt This indicates the total number of attempts to send the request.
* @param suppressed The list of throwables that has been suppressed.
* @return A single containing either the successful response or an error that was not retryable because either the
* {@code maxTries} was exceeded or retries will not mitigate the issue.
*/
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalRequest, boolean considerSecondary, int primaryTry, int attempt,
List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
try {
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
} catch (IllegalArgumentException e) {
return Mono.error(e);
}
updateRetryCountContext(context, attempt);
resetProgress(context);
Mono<HttpResponse> responseMono = next.clone().process();
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
responseMono = responseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
if (delayMs > 0) {
responseMono = responseMono.delaySubscription(Duration.ofMillis(delayMs));
}
return responseMono.flatMap(response -> {
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
Flux<ByteBuffer> responseBody = response.getBody();
response.close();
if (responseBody == null) {
return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
} else {
return responseBody
.ignoreElements()
.then(attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed));
}
}
return Mono.just(response);
}).onErrorResume(throwable -> {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
return Mono.error(new IllegalStateException("The request failed because the size of the contents of "
+ "the provided Flux did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the Flux not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable));
}
/*
* IOException is a catch-all for IO related errors. Technically it includes many types which may not be
* network exceptions, but we should not hit those unless there is a bug in our logic. In either case, it is
* better to optimistically retry instead of failing too soon. A Timeout Exception is a client-side timeout
* coming from Rx.
*/
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
return Mono.error(throwable);
});
}
private HttpResponse attemptSync(final HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
final HttpRequest originalRequest, final boolean considerSecondary, final int primaryTry, final int attempt,
final List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
updateRetryCountContext(context, attempt);
resetProgress(context);
try {
if (delayMs > 0) {
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
}
/*
* We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound
* operation until after the retry backoff delay, so we call delaySubscription.
*/
Mono<HttpResponse> httpResponseMono = Mono.fromCallable(() -> next.clone().processSync());
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
httpResponseMono = httpResponseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
HttpResponse response = httpResponseMono.block();
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
if (response.getBody() != null) {
response.getBodyAsBinaryData().toByteBuffer();
}
response.close();
return attemptSync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
}
return response;
} catch (RuntimeException throwable) {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
throw LOGGER.logExceptionAsError((new IllegalStateException("The request failed because the size of the contents of "
+ "the provided data did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the data not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable)));
}
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptSync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
throw LOGGER.logExceptionAsError(throwable);
}
}
/*
* Update the RETRY_COUNT_CONTEXT to log retries.
*/
private static void updateRetryCountContext(HttpPipelineCallContext context, int attempt) {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, attempt);
}
private static void resetProgress(HttpPipelineCallContext context) {
ProgressReporter progressReporter = Contexts.with(context.getContext()).getHttpRequestProgressReporter();
if (progressReporter != null) {
progressReporter.reset();
}
}
/*
* Update secondary host on request URL if not trying primary URL.
*/
private static void updateUrlToSecondaryHost(boolean tryingPrimary, String secondaryHost, HttpPipelineCallContext context) {
if (!tryingPrimary) {
UrlBuilder builder = UrlBuilder.parse(context.getHttpRequest().getUrl());
builder.setHost(secondaryHost);
try {
context.getHttpRequest().setUrl(builder.toUrl());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL", e));
}
}
}
static ExceptionRetryStatus shouldErrorBeRetried(Throwable error, int attempt, int maxAttempts) {
Throwable unwrappedThrowable = Exceptions.unwrap(error);
if (attempt >= maxAttempts) {
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
if (unwrappedThrowable instanceof IOException || unwrappedThrowable instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
Throwable causalException = unwrappedThrowable.getCause();
while (causalException != null) {
if (causalException instanceof IOException || causalException instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
causalException = causalException.getCause();
}
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
static final class ExceptionRetryStatus {
final boolean canBeRetried;
final Throwable unwrappedThrowable;
ExceptionRetryStatus(boolean canBeRetried, Throwable unwrappedThrowable) {
this.canBeRetried = canBeRetried;
this.unwrappedThrowable = unwrappedThrowable;
}
}
private long getDelayMs(int primaryTry, boolean tryingPrimary) {
long delayMs;
if (tryingPrimary) {
delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry);
} else {
delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000);
}
return delayMs;
}
private static int getNewPrimaryTry(boolean considerSecondary, int primaryTry, boolean tryingPrimary) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
return (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
}
} | class RequestRetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RequestRetryPolicy.class);
private final RequestRetryOptions requestRetryOptions;
private static final HttpHeaderName X_MS_COPY_SOURCE_ERROR_CODE = HttpHeaderName.fromString("x-ms-copy-source-error-code");
/**
* Constructs the policy using the retry options.
*
* @param requestRetryOptions Retry options for the policy.
*/
public RequestRetryPolicy(RequestRetryOptions requestRetryOptions) {
this.requestRetryOptions = requestRetryOptions;
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return this.attemptSync(context, next, originalHttpRequest, considerSecondary, 1, 1, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
Flux<ByteBuffer> bufferedBody = context.getHttpRequest().getBody().map(ByteBuffer::duplicate);
context.getHttpRequest().setBody(bufferedBody);
}
return this.attemptAsync(context, next, context.getHttpRequest(), considerSecondary, 1, 1, null);
}
/**
* This method actually attempts to send the request and determines if we should attempt again and, if so, how long
* to wait before sending out the next request.
* <p>
* Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or
* an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary;
* even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If
* secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a
* secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
* @param context The request to try.
* @param next The next policy to apply to the request.
* @param originalRequest The unmodified original request.
* @param considerSecondary Before each try, we'll select either the primary or secondary URL if appropriate.
* @param primaryTry Number of attempts against the primary DC.
* @param attempt This indicates the total number of attempts to send the request.
* @param suppressed The list of throwables that has been suppressed.
* @return A single containing either the successful response or an error that was not retryable because either the
* {@code maxTries} was exceeded or retries will not mitigate the issue.
*/
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalRequest, boolean considerSecondary, int primaryTry, int attempt,
List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
try {
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
} catch (IllegalArgumentException e) {
return Mono.error(e);
}
updateRetryCountContext(context, attempt);
resetProgress(context);
Mono<HttpResponse> responseMono = next.clone().process();
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
responseMono = responseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
if (delayMs > 0) {
responseMono = responseMono.delaySubscription(Duration.ofMillis(delayMs));
}
return responseMono.flatMap(response -> {
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
Flux<ByteBuffer> responseBody = response.getBody();
response.close();
if (responseBody == null) {
return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
} else {
return responseBody
.ignoreElements()
.then(attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed));
}
}
return Mono.just(response);
}).onErrorResume(throwable -> {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
return Mono.error(new IllegalStateException("The request failed because the size of the contents of "
+ "the provided Flux did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the Flux not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable));
}
/*
* IOException is a catch-all for IO related errors. Technically it includes many types which may not be
* network exceptions, but we should not hit those unless there is a bug in our logic. In either case, it is
* better to optimistically retry instead of failing too soon. A Timeout Exception is a client-side timeout
* coming from Rx.
*/
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
return Mono.error(throwable);
});
}
private HttpResponse attemptSync(final HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
final HttpRequest originalRequest, final boolean considerSecondary, final int primaryTry, final int attempt,
final List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
updateRetryCountContext(context, attempt);
resetProgress(context);
try {
if (delayMs > 0) {
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
}
/*
* We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound
* operation until after the retry backoff delay, so we call delaySubscription.
*/
Mono<HttpResponse> httpResponseMono = Mono.fromCallable(() -> next.clone().processSync());
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
httpResponseMono = httpResponseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
HttpResponse response = httpResponseMono.block();
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
if (response.getBody() != null) {
response.getBodyAsBinaryData().toByteBuffer();
}
response.close();
return attemptSync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
}
return response;
} catch (RuntimeException throwable) {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
throw LOGGER.logExceptionAsError((new IllegalStateException("The request failed because the size of the contents of "
+ "the provided data did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the data not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable)));
}
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptSync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
throw LOGGER.logExceptionAsError(throwable);
}
}
/*
* Update the RETRY_COUNT_CONTEXT to log retries.
*/
private static void updateRetryCountContext(HttpPipelineCallContext context, int attempt) {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, attempt);
}
private static void resetProgress(HttpPipelineCallContext context) {
ProgressReporter progressReporter = Contexts.with(context.getContext()).getHttpRequestProgressReporter();
if (progressReporter != null) {
progressReporter.reset();
}
}
/*
* Update secondary host on request URL if not trying primary URL.
*/
private static void updateUrlToSecondaryHost(boolean tryingPrimary, String secondaryHost, HttpPipelineCallContext context) {
if (!tryingPrimary) {
UrlBuilder builder = UrlBuilder.parse(context.getHttpRequest().getUrl());
builder.setHost(secondaryHost);
try {
context.getHttpRequest().setUrl(builder.toUrl());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL", e));
}
}
}
static ExceptionRetryStatus shouldErrorBeRetried(Throwable error, int attempt, int maxAttempts) {
Throwable unwrappedThrowable = Exceptions.unwrap(error);
if (attempt >= maxAttempts) {
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
if (unwrappedThrowable instanceof IOException || unwrappedThrowable instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
Throwable causalException = unwrappedThrowable.getCause();
while (causalException != null) {
if (causalException instanceof IOException || causalException instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
causalException = causalException.getCause();
}
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
static final class ExceptionRetryStatus {
final boolean canBeRetried;
final Throwable unwrappedThrowable;
ExceptionRetryStatus(boolean canBeRetried, Throwable unwrappedThrowable) {
this.canBeRetried = canBeRetried;
this.unwrappedThrowable = unwrappedThrowable;
}
}
private long getDelayMs(int primaryTry, boolean tryingPrimary) {
long delayMs;
if (tryingPrimary) {
delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry);
} else {
delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000);
}
return delayMs;
}
private static int getNewPrimaryTry(boolean considerSecondary, int primaryTry, boolean tryingPrimary) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
return (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
}
} |
I'd make String constants here instead of doing this numeric to String conversion, ex, just use `"429"` | static boolean shouldResponseBeRetried(int statusCode, boolean isPrimary, HttpResponse response) {
/*
* Retry the request if the server had an error (500), was unavailable (503), or requested a backoff (429),
* or if the secondary was being tried and the resources didn't exist there (404). Only the secondary can retry
* if the resource wasn't found as there may be a delay in replication from the primary.
*/
if (response != null) {
String headerValue = response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-copy-source-error-code"));
if (headerValue != null) {
switch (headerValue) {
case "" + 429:
case "" + 500:
case "" + 503:
return true;
case "" + 404:
return !isPrimary;
default:
break;
}
}
}
return (statusCode == 429 || statusCode == 500 || statusCode == 503)
|| (!isPrimary && statusCode == 404);
} | case "" + 429: | static boolean shouldResponseBeRetried(int statusCode, boolean isPrimary, HttpResponse response) {
/*
* Retry the request if the server had an error (500), was unavailable (503), or requested a backoff (429),
* or if the secondary was being tried and the resources didn't exist there (404). Only the secondary can retry
* if the resource wasn't found as there may be a delay in replication from the primary.
*/
boolean headerRetry = false;
boolean statusCodeRetry = (statusCode == 429 || statusCode == 500 || statusCode == 503) || (!isPrimary && statusCode == 404);
if (response != null && response.getHeaders() != null) {
String headerValue = response.getHeaders().getValue(X_MS_COPY_SOURCE_ERROR_CODE);
if (headerValue != null) {
headerRetry = ("429".equals(headerValue) || "500".equals(headerValue) || "503".equals(headerValue))
|| (!isPrimary && "404".equals(headerValue));
}
}
return statusCodeRetry || headerRetry;
} | class RequestRetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RequestRetryPolicy.class);
private final RequestRetryOptions requestRetryOptions;
/**
* Constructs the policy using the retry options.
*
* @param requestRetryOptions Retry options for the policy.
*/
public RequestRetryPolicy(RequestRetryOptions requestRetryOptions) {
this.requestRetryOptions = requestRetryOptions;
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return this.attemptSync(context, next, originalHttpRequest, considerSecondary, 1, 1, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
Flux<ByteBuffer> bufferedBody = context.getHttpRequest().getBody().map(ByteBuffer::duplicate);
context.getHttpRequest().setBody(bufferedBody);
}
return this.attemptAsync(context, next, context.getHttpRequest(), considerSecondary, 1, 1, null);
}
/**
* This method actually attempts to send the request and determines if we should attempt again and, if so, how long
* to wait before sending out the next request.
* <p>
* Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or
* an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary;
* even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If
* secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a
* secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
* @param context The request to try.
* @param next The next policy to apply to the request.
* @param originalRequest The unmodified original request.
* @param considerSecondary Before each try, we'll select either the primary or secondary URL if appropriate.
* @param primaryTry Number of attempts against the primary DC.
* @param attempt This indicates the total number of attempts to send the request.
* @param suppressed The list of throwables that has been suppressed.
* @return A single containing either the successful response or an error that was not retryable because either the
* {@code maxTries} was exceeded or retries will not mitigate the issue.
*/
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalRequest, boolean considerSecondary, int primaryTry, int attempt,
List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
try {
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
} catch (IllegalArgumentException e) {
return Mono.error(e);
}
updateRetryCountContext(context, attempt);
resetProgress(context);
Mono<HttpResponse> responseMono = next.clone().process();
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
responseMono = responseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
if (delayMs > 0) {
responseMono = responseMono.delaySubscription(Duration.ofMillis(delayMs));
}
return responseMono.flatMap(response -> {
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
Flux<ByteBuffer> responseBody = response.getBody();
response.close();
if (responseBody == null) {
return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
} else {
return responseBody
.ignoreElements()
.then(attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed));
}
}
return Mono.just(response);
}).onErrorResume(throwable -> {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
return Mono.error(new IllegalStateException("The request failed because the size of the contents of "
+ "the provided Flux did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the Flux not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable));
}
/*
* IOException is a catch-all for IO related errors. Technically it includes many types which may not be
* network exceptions, but we should not hit those unless there is a bug in our logic. In either case, it is
* better to optimistically retry instead of failing too soon. A Timeout Exception is a client-side timeout
* coming from Rx.
*/
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
return Mono.error(throwable);
});
}
private HttpResponse attemptSync(final HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
final HttpRequest originalRequest, final boolean considerSecondary, final int primaryTry, final int attempt,
final List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
updateRetryCountContext(context, attempt);
resetProgress(context);
try {
if (delayMs > 0) {
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
}
/*
* We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound
* operation until after the retry backoff delay, so we call delaySubscription.
*/
Mono<HttpResponse> httpResponseMono = Mono.fromCallable(() -> next.clone().processSync());
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
httpResponseMono = httpResponseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
HttpResponse response = httpResponseMono.block();
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
if (response.getBody() != null) {
response.getBodyAsBinaryData().toByteBuffer();
}
response.close();
return attemptSync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
}
return response;
} catch (RuntimeException throwable) {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
throw LOGGER.logExceptionAsError((new IllegalStateException("The request failed because the size of the contents of "
+ "the provided data did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the data not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable)));
}
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptSync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
throw LOGGER.logExceptionAsError(throwable);
}
}
/*
* Update the RETRY_COUNT_CONTEXT to log retries.
*/
private static void updateRetryCountContext(HttpPipelineCallContext context, int attempt) {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, attempt);
}
private static void resetProgress(HttpPipelineCallContext context) {
ProgressReporter progressReporter = Contexts.with(context.getContext()).getHttpRequestProgressReporter();
if (progressReporter != null) {
progressReporter.reset();
}
}
/*
* Update secondary host on request URL if not trying primary URL.
*/
private static void updateUrlToSecondaryHost(boolean tryingPrimary, String secondaryHost, HttpPipelineCallContext context) {
if (!tryingPrimary) {
UrlBuilder builder = UrlBuilder.parse(context.getHttpRequest().getUrl());
builder.setHost(secondaryHost);
try {
context.getHttpRequest().setUrl(builder.toUrl());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL", e));
}
}
}
static ExceptionRetryStatus shouldErrorBeRetried(Throwable error, int attempt, int maxAttempts) {
Throwable unwrappedThrowable = Exceptions.unwrap(error);
if (attempt >= maxAttempts) {
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
if (unwrappedThrowable instanceof IOException || unwrappedThrowable instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
Throwable causalException = unwrappedThrowable.getCause();
while (causalException != null) {
if (causalException instanceof IOException || causalException instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
causalException = causalException.getCause();
}
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
static final class ExceptionRetryStatus {
final boolean canBeRetried;
final Throwable unwrappedThrowable;
ExceptionRetryStatus(boolean canBeRetried, Throwable unwrappedThrowable) {
this.canBeRetried = canBeRetried;
this.unwrappedThrowable = unwrappedThrowable;
}
}
private long getDelayMs(int primaryTry, boolean tryingPrimary) {
long delayMs;
if (tryingPrimary) {
delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry);
} else {
delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000);
}
return delayMs;
}
private static int getNewPrimaryTry(boolean considerSecondary, int primaryTry, boolean tryingPrimary) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
return (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
}
} | class RequestRetryPolicy implements HttpPipelinePolicy {
private static final ClientLogger LOGGER = new ClientLogger(RequestRetryPolicy.class);
private final RequestRetryOptions requestRetryOptions;
private static final HttpHeaderName X_MS_COPY_SOURCE_ERROR_CODE = HttpHeaderName.fromString("x-ms-copy-source-error-code");
/**
* Constructs the policy using the retry options.
*
* @param requestRetryOptions Retry options for the policy.
*/
public RequestRetryPolicy(RequestRetryOptions requestRetryOptions) {
this.requestRetryOptions = requestRetryOptions;
}
@Override
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
context.getHttpRequest().setBody(context.getHttpRequest().getBodyAsBinaryData().toReplayableBinaryData());
}
return this.attemptSync(context, next, originalHttpRequest, considerSecondary, 1, 1, null);
}
@Override
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) {
boolean considerSecondary = (this.requestRetryOptions.getSecondaryHost() != null)
&& (HttpMethod.GET.equals(context.getHttpRequest().getHttpMethod())
|| HttpMethod.HEAD.equals(context.getHttpRequest().getHttpMethod()));
HttpRequest originalHttpRequest = context.getHttpRequest();
BinaryData originalRequestBody = originalHttpRequest.getBodyAsBinaryData();
if (requestRetryOptions.getMaxTries() > 1 && originalRequestBody != null
&& !originalRequestBody.isReplayable()) {
Flux<ByteBuffer> bufferedBody = context.getHttpRequest().getBody().map(ByteBuffer::duplicate);
context.getHttpRequest().setBody(bufferedBody);
}
return this.attemptAsync(context, next, context.getHttpRequest(), considerSecondary, 1, 1, null);
}
/**
* This method actually attempts to send the request and determines if we should attempt again and, if so, how long
* to wait before sending out the next request.
* <p>
* Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure or
* an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against primary;
* even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) If
* secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying against a
* secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
* @param context The request to try.
* @param next The next policy to apply to the request.
* @param originalRequest The unmodified original request.
* @param considerSecondary Before each try, we'll select either the primary or secondary URL if appropriate.
* @param primaryTry Number of attempts against the primary DC.
* @param attempt This indicates the total number of attempts to send the request.
* @param suppressed The list of throwables that has been suppressed.
* @return A single containing either the successful response or an error that was not retryable because either the
* {@code maxTries} was exceeded or retries will not mitigate the issue.
*/
private Mono<HttpResponse> attemptAsync(HttpPipelineCallContext context, HttpPipelineNextPolicy next,
HttpRequest originalRequest, boolean considerSecondary, int primaryTry, int attempt,
List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
try {
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
} catch (IllegalArgumentException e) {
return Mono.error(e);
}
updateRetryCountContext(context, attempt);
resetProgress(context);
Mono<HttpResponse> responseMono = next.clone().process();
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
responseMono = responseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
if (delayMs > 0) {
responseMono = responseMono.delaySubscription(Duration.ofMillis(delayMs));
}
return responseMono.flatMap(response -> {
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
Flux<ByteBuffer> responseBody = response.getBody();
response.close();
if (responseBody == null) {
return attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
} else {
return responseBody
.ignoreElements()
.then(attemptAsync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed));
}
}
return Mono.just(response);
}).onErrorResume(throwable -> {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
return Mono.error(new IllegalStateException("The request failed because the size of the contents of "
+ "the provided Flux did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the Flux not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable));
}
/*
* IOException is a catch-all for IO related errors. Technically it includes many types which may not be
* network exceptions, but we should not hit those unless there is a bug in our logic. In either case, it is
* better to optimistically retry instead of failing too soon. A Timeout Exception is a client-side timeout
* coming from Rx.
*/
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptAsync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
return Mono.error(throwable);
});
}
private HttpResponse attemptSync(final HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next,
final HttpRequest originalRequest, final boolean considerSecondary, final int primaryTry, final int attempt,
final List<Throwable> suppressed) {
final boolean tryingPrimary = !considerSecondary || (attempt % 2 != 0);
long delayMs = getDelayMs(primaryTry, tryingPrimary);
context.setHttpRequest(originalRequest.copy());
updateUrlToSecondaryHost(tryingPrimary, this.requestRetryOptions.getSecondaryHost(), context);
updateRetryCountContext(context, attempt);
resetProgress(context);
try {
if (delayMs > 0) {
try {
Thread.sleep(delayMs);
} catch (InterruptedException ie) {
throw LOGGER.logExceptionAsError(new RuntimeException(ie));
}
}
/*
* We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound
* operation until after the retry backoff delay, so we call delaySubscription.
*/
Mono<HttpResponse> httpResponseMono = Mono.fromCallable(() -> next.clone().processSync());
if (this.requestRetryOptions.getTryTimeoutDuration().getSeconds() != Integer.MAX_VALUE) {
httpResponseMono = httpResponseMono.timeout(this.requestRetryOptions.getTryTimeoutDuration());
}
HttpResponse response = httpResponseMono.block();
boolean newConsiderSecondary = considerSecondary;
int statusCode = response.getStatusCode();
boolean retry = shouldResponseBeRetried(statusCode, tryingPrimary, response);
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
}
if (retry && attempt < requestRetryOptions.getMaxTries()) {
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
if (response.getBody() != null) {
response.getBodyAsBinaryData().toByteBuffer();
}
response.close();
return attemptSync(context, next, originalRequest, newConsiderSecondary, newPrimaryTry,
attempt + 1, suppressed);
}
return response;
} catch (RuntimeException throwable) {
/*
* It is likely that many users will not realize that their Flux must be replayable and get an error upon
* retries when the provided data length does not match the length of the exact data. We cannot enforce the
* desired Flux behavior, so we provide a hint when this is likely the root cause.
*/
if (throwable instanceof IllegalStateException && attempt > 1) {
throw LOGGER.logExceptionAsError((new IllegalStateException("The request failed because the size of the contents of "
+ "the provided data did not match the provided data size upon attempting to retry. This is likely "
+ "caused by the data not being replayable. To support retries, all Fluxes must produce the same "
+ "data for each subscriber. Please ensure this behavior.", throwable)));
}
ExceptionRetryStatus exceptionRetryStatus = shouldErrorBeRetried(throwable, attempt,
requestRetryOptions.getMaxTries());
if (exceptionRetryStatus.canBeRetried) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
int newPrimaryTry = getNewPrimaryTry(considerSecondary, primaryTry, tryingPrimary);
List<Throwable> suppressedLocal = suppressed == null ? new LinkedList<>() : suppressed;
suppressedLocal.add(exceptionRetryStatus.unwrappedThrowable);
return attemptSync(context, next, originalRequest, considerSecondary, newPrimaryTry, attempt + 1,
suppressedLocal);
}
if (suppressed != null) {
suppressed.forEach(throwable::addSuppressed);
}
throw LOGGER.logExceptionAsError(throwable);
}
}
/*
* Update the RETRY_COUNT_CONTEXT to log retries.
*/
private static void updateRetryCountContext(HttpPipelineCallContext context, int attempt) {
context.setData(HttpLoggingPolicy.RETRY_COUNT_CONTEXT, attempt);
}
private static void resetProgress(HttpPipelineCallContext context) {
ProgressReporter progressReporter = Contexts.with(context.getContext()).getHttpRequestProgressReporter();
if (progressReporter != null) {
progressReporter.reset();
}
}
/*
* Update secondary host on request URL if not trying primary URL.
*/
private static void updateUrlToSecondaryHost(boolean tryingPrimary, String secondaryHost, HttpPipelineCallContext context) {
if (!tryingPrimary) {
UrlBuilder builder = UrlBuilder.parse(context.getHttpRequest().getUrl());
builder.setHost(secondaryHost);
try {
context.getHttpRequest().setUrl(builder.toUrl());
} catch (MalformedURLException e) {
throw LOGGER.logExceptionAsWarning(new IllegalArgumentException("'url' must be a valid URL", e));
}
}
}
static ExceptionRetryStatus shouldErrorBeRetried(Throwable error, int attempt, int maxAttempts) {
Throwable unwrappedThrowable = Exceptions.unwrap(error);
if (attempt >= maxAttempts) {
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
if (unwrappedThrowable instanceof IOException || unwrappedThrowable instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
Throwable causalException = unwrappedThrowable.getCause();
while (causalException != null) {
if (causalException instanceof IOException || causalException instanceof TimeoutException) {
return new ExceptionRetryStatus(true, unwrappedThrowable);
}
causalException = causalException.getCause();
}
return new ExceptionRetryStatus(false, unwrappedThrowable);
}
static final class ExceptionRetryStatus {
final boolean canBeRetried;
final Throwable unwrappedThrowable;
ExceptionRetryStatus(boolean canBeRetried, Throwable unwrappedThrowable) {
this.canBeRetried = canBeRetried;
this.unwrappedThrowable = unwrappedThrowable;
}
}
private long getDelayMs(int primaryTry, boolean tryingPrimary) {
long delayMs;
if (tryingPrimary) {
delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry);
} else {
delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000);
}
return delayMs;
}
private static int getNewPrimaryTry(boolean considerSecondary, int primaryTry, boolean tryingPrimary) {
/*
* We increment primaryTry if we are about to try the primary again (which is when we consider the
* secondary and tried the secondary this time (tryingPrimary==false) or we do not consider the
* secondary at all (considerSecondary==false)). This will ensure primaryTry is correct when passed to
* calculate the delay.
*/
return (!tryingPrimary || !considerSecondary) ? primaryTry + 1 : primaryTry;
}
} |
Does [this comment ](https://github.com/Azure/azure-sdk-for-java/pull/39093#discussion_r1535967042) apply here? | private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
} | return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 | private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
private void logHeaders(ClientLogger logger, Response<?> response,
ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(ClientLogger.LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(Response<?> response, Duration duration, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY,
getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, duration.toMillis());
}
}
private void logContentLength(Response<?> response, ClientLogger.LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public Response<?> logResponse(ClientLogger logger, Response<?> response, Duration duration) {
final ClientLogger.LogLevel logLevel = getLogLevel(response);
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
ClientLogger.LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(response, duration, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse<>(response, logBuilder);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
private void logHeaders(ClientLogger logger, Response<?> response,
ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(ClientLogger.LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(Response<?> response, Duration duration, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY,
getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, duration.toMillis());
}
}
private void logContentLength(Response<?> response, ClientLogger.LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public Response<?> logResponse(ClientLogger logger, Response<?> response, Duration duration) {
final ClientLogger.LogLevel logLevel = getLogLevel(response);
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
ClientLogger.LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(response, duration, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse<>(response, logBuilder);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} |
It does, mind filing an issue for it as this PR has a lot of merge conflict spots | private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
} | return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 | private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) {
return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0
&& contentLength < MAX_BODY_LOG_SIZE;
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
private void logHeaders(ClientLogger logger, Response<?> response,
ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(ClientLogger.LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(Response<?> response, Duration duration, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY,
getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, duration.toMillis());
}
}
private void logContentLength(Response<?> response, ClientLogger.LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public Response<?> logResponse(ClientLogger logger, Response<?> response, Duration duration) {
final ClientLogger.LogLevel logLevel = getLogLevel(response);
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
ClientLogger.LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(response, duration, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse<>(response, logBuilder);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} | class DefaultHttpResponseLogger implements HttpResponseLogger {
private void logHeaders(ClientLogger logger, Response<?> response,
ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(ClientLogger.LogLevel.INFORMATIONAL)) {
addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder);
}
}
private void logUrl(Response<?> response, Duration duration, ClientLogger.LoggingEventBuilder logBuilder) {
if (httpLogDetailLevel.shouldLogUrl()) {
logBuilder.addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode())
.addKeyValue(LoggingKeys.URL_KEY,
getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames))
.addKeyValue(LoggingKeys.DURATION_MS_KEY, duration.toMillis());
}
}
private void logContentLength(Response<?> response, ClientLogger.LoggingEventBuilder logBuilder) {
String contentLengthString = response.getHeaders().getValue(HttpHeaderName.CONTENT_LENGTH);
if (!CoreUtils.isNullOrEmpty(contentLengthString)) {
logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString);
}
}
@Override
public Response<?> logResponse(ClientLogger logger, Response<?> response, Duration duration) {
final ClientLogger.LogLevel logLevel = getLogLevel(response);
if (!logger.canLogAtLevel(logLevel)) {
return response;
}
ClientLogger.LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger);
logContentLength(response, logBuilder);
logUrl(response, duration, logBuilder);
logHeaders(logger, response, logBuilder);
if (httpLogDetailLevel.shouldLogBody()) {
String contentTypeHeader = response.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE);
long contentLength = getContentLength(logger, response.getHeaders());
if (shouldBodyBeLogged(contentTypeHeader, contentLength)) {
return new LoggingHttpResponse<>(response, logBuilder);
}
}
logBuilder.log(RESPONSE_LOG_MESSAGE);
return response;
}
} |
Since we don't care about the `ShareProperties` we should think about calling the get properties API in implementation that doesn't convert the headers to an object. This isn't something we should change in this PR, and is something that would apply to Blob and DataLake as well so we can reduce the overhead of existence checking. | public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e)) {
HttpResponse response = e instanceof ShareStorageException
? ((ShareStorageException) e).getResponse()
: ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
} | Response<ShareProperties> response = getPropertiesWithResponse(timeout, context); | public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e) && e instanceof HttpResponseException) {
HttpResponse response = ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
} | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
return shareUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String shareUrlString;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
this.shareUrlString = shareUrlString.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
return this.shareUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
directoryName = "/".equals(directoryName) ? "" : directoryName;
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient |
Given the `ShareClient` is immutable after creation we should just create this when constructing the class. | public String getShareUrl() {
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
return shareUrlString.toString();
} | return shareUrlString.toString(); | public String getShareUrl() {
return this.shareUrlString;
} | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String shareUrlString;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
this.shareUrlString = shareUrlString.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
directoryName = "/".equals(directoryName) ? "" : directoryName;
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient |
This isn't needed as `ShareStorageException` is an instance of `HttpResponseException`, so we can just use `((HttpResponseException) e).getResponse()` | public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e)) {
HttpResponse response = e instanceof ShareStorageException
? ((ShareStorageException) e).getResponse()
: ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
} | : ((HttpResponseException) e).getResponse(); | public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e) && e instanceof HttpResponseException) {
HttpResponse response = ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
} | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
return shareUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String shareUrlString;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
this.shareUrlString = shareUrlString.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
return this.shareUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
directoryName = "/".equals(directoryName) ? "" : directoryName;
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient |
This is something we need to fix, a 409 doesn't always mean that the share already exists. There are other reasons we could get a 409, so we should check for the specific error code indicating the share already exists. | public Response<ShareInfo> createIfNotExistsWithResponse(ShareCreateOptions options, Duration timeout, Context context) {
try {
return createWithResponse(options, timeout, context);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 409) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
} | if (e.getStatusCode() == 409) { | public Response<ShareInfo> createIfNotExistsWithResponse(ShareCreateOptions options, Duration timeout, Context context) {
try {
return createWithResponse(options, timeout, context);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 409 && e.getErrorCode().equals(ShareErrorCode.SHARE_ALREADY_EXISTS)) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), null);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
} | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
return shareUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String shareUrlString;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
this.shareUrlString = shareUrlString.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
return this.shareUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
directoryName = "/".equals(directoryName) ? "" : directoryName;
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient |
Similar concern where 404 could mean more than just the share doesn't exist. | public Response<Boolean> deleteIfExistsWithResponse(ShareDeleteOptions options, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareDeleteOptions finalOptions = options == null ? new ShareDeleteOptions() : options;
ShareRequestConditions requestConditions = finalOptions.getRequestConditions() == null
? new ShareRequestConditions() : finalOptions.getRequestConditions();
try {
Supplier<ResponseBase<SharesDeleteHeaders, Void>> operation = () ->
this.azureFileStorageClient.getShares()
.deleteWithResponse(shareName, snapshot, null,
ModelHelper.toDeleteSnapshotsOptionType(finalOptions.getDeleteSnapshotsOptions()),
requestConditions.getLeaseId(), finalContext);
Response<Void> response = timeout != null ? THREAD_POOL.submit(operation::get).get(timeout.toMillis(),
TimeUnit.MILLISECONDS) : operation.get();
return new SimpleResponse<>(response, true);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 404) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} | if (e.getStatusCode() == 404) { | public Response<Boolean> deleteIfExistsWithResponse(ShareDeleteOptions options, Duration timeout, Context context) {
try {
Response<Void> response = this.deleteWithResponse(options, timeout, context);
return new SimpleResponse<>(response, true);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(ShareErrorCode.SHARE_NOT_FOUND)) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
} | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
return shareUrlString.toString();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient | class ShareClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String shareUrlString;
/**
* Creates a ShareClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String snapshot, String accountName,
ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
this.shareName = shareName;
this.snapshot = snapshot;
this.accountName = accountName;
this.azureFileStorageClient = azureFileStorageClient;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder shareUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/").append(shareName);
if (snapshot != null) {
shareUrlString.append("?sharesnapshot=").append(snapshot);
}
this.shareUrlString = shareUrlString.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage share client.
*
* @return the url of the Storage Share.
*/
public String getShareUrl() {
return this.shareUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the root directory in the share.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @return a {@link ShareDirectoryClient} that interacts with the root directory in the share
*/
public ShareDirectoryClient getRootDirectoryClient() {
return getDirectoryClient("");
}
/**
* Constructs a {@link ShareDirectoryClient} that interacts with the specified directory.
*
* <p>If the directory doesn't exist in the share {@link ShareDirectoryClient
* need to be called before interaction with the directory can happen.</p>
*
* @param directoryName Name of the directory
* @return a {@link ShareDirectoryClient} that interacts with the directory in the share
*/
public ShareDirectoryClient getDirectoryClient(String directoryName) {
directoryName = "/".equals(directoryName) ? "" : directoryName;
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryName, snapshot, accountName,
serviceVersion, sasToken);
}
/**
* Constructs a {@link ShareFileClient} that interacts with the specified file.
*
* <p>If the file doesn't exist in the share {@link ShareFileClient |
Don't need a StringBuilder here as the string concatenations are simple. ```suggestion boolean needPathDelimeter = !this.directoryPath.isEmpty() && !this.directoryPath.endsWith("/"); String subDirectoryPath = this.directoryPath + (needPathDelimiter ? "/" : "") + subdirectoryName; ``` And looking at this more, this can run into issues if the subdirectoryName is passed with a leading `/`, but we can handle that in a separate change. | public ShareDirectoryClient getSubdirectoryClient(String subdirectoryName) {
StringBuilder directoryPathBuilder = new StringBuilder()
.append(this.directoryPath);
if (!this.directoryPath.isEmpty() && !this.directoryPath.endsWith("/")) {
directoryPathBuilder.append("/");
}
directoryPathBuilder.append(subdirectoryName);
return new ShareDirectoryClient(azureFileStorageClient, shareName, directoryPathBuilder.toString(), snapshot,
accountName, serviceVersion, sasToken);
} | directoryPathBuilder.append(subdirectoryName); | public ShareDirectoryClient getSubdirectoryClient(String subdirectoryName) {
boolean needPathDelimiter = !this.directoryPath.isEmpty() && !this.directoryPath.endsWith("/");
String subDirectoryPath = this.directoryPath + (needPathDelimiter ? "/" : "") + subdirectoryName;
return new ShareDirectoryClient(azureFileStorageClient, shareName, subDirectoryPath, snapshot, accountName,
serviceVersion, sasToken);
} | class ShareDirectoryClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareDirectoryClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String directoryPath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareDirectoryClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param directoryPath Name of the directory
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareDirectoryClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath,
String snapshot, String accountName, ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(directoryPath);
this.shareName = shareName;
this.directoryPath = directoryPath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage directory client.
*
* @return the URL of the storage directory client.
*/
public String getDirectoryUrl() {
return this.azureFileStorageClient.getUrl();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a ShareFileClient that interacts with the specified file.
*
* <p>If the file doesn't exist in this directory {@link ShareFileClient | class ShareDirectoryClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareDirectoryClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String directoryPath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String directoryUrl;
/**
* Creates a ShareDirectoryClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param directoryPath Name of the directory
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareDirectoryClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath,
String snapshot, String accountName, ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(directoryPath);
this.shareName = shareName;
this.directoryPath = directoryPath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder directoryUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(directoryPath);
if (snapshot != null) {
directoryUrlString.append("?sharesnapshot=").append(snapshot);
}
this.directoryUrl = directoryUrlString.toString();
}
/**
* Get the url of the storage directory client.
*
* @return the URL of the storage directory client.
*/
public String getDirectoryUrl() {
return this.directoryUrl;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a ShareFileClient that interacts with the specified file.
*
* <p>If the file doesn't exist in this directory {@link ShareFileClient |
Same comment as before where `ShareStorageException` is a subtype of `HttpResponseException`. | public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareDirectoryProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e)) {
HttpResponse response = e instanceof ShareStorageException
? ((ShareStorageException) e).getResponse()
: ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
} | : ((HttpResponseException) e).getResponse(); | public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareDirectoryProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e) && e instanceof HttpResponseException) {
HttpResponse response = ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
} | class ShareDirectoryClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareDirectoryClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String directoryPath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareDirectoryClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param directoryPath Name of the directory
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareDirectoryClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath,
String snapshot, String accountName, ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(directoryPath);
this.shareName = shareName;
this.directoryPath = directoryPath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage directory client.
*
* @return the URL of the storage directory client.
*/
public String getDirectoryUrl() {
return this.azureFileStorageClient.getUrl();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a ShareFileClient that interacts with the specified file.
*
* <p>If the file doesn't exist in this directory {@link ShareFileClient | class ShareDirectoryClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareDirectoryClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String directoryPath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String directoryUrl;
/**
* Creates a ShareDirectoryClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param directoryPath Name of the directory
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareDirectoryClient(AzureFileStorageImpl azureFileStorageClient, String shareName, String directoryPath,
String snapshot, String accountName, ShareServiceVersion serviceVersion, AzureSasCredential sasToken) {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(directoryPath);
this.shareName = shareName;
this.directoryPath = directoryPath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder directoryUrlString = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(directoryPath);
if (snapshot != null) {
directoryUrlString.append("?sharesnapshot=").append(snapshot);
}
this.directoryUrl = directoryUrlString.toString();
}
/**
* Get the url of the storage directory client.
*
* @return the URL of the storage directory client.
*/
public String getDirectoryUrl() {
return this.directoryUrl;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Constructs a ShareFileClient that interacts with the specified file.
*
* <p>If the file doesn't exist in this directory {@link ShareFileClient |
We shouldn't mutate the client builder internally, I'd revert this back and just change how the sync and async clients are built. | private ShareServiceVersion getServiceVersion() {
if (version == null) {
version = ShareServiceVersion.getLatest();
}
return version;
} | return version; | private ShareServiceVersion getServiceVersion() {
return version != null ? version : ShareServiceVersion.getLatest();
} | class ShareFileClientBuilder implements
TokenCredentialTrait<ShareFileClientBuilder>,
HttpTrait<ShareFileClientBuilder>,
ConnectionStringTrait<ShareFileClientBuilder>,
AzureNamedKeyCredentialTrait<ShareFileClientBuilder>,
AzureSasCredentialTrait<ShareFileClientBuilder>,
ConfigurationTrait<ShareFileClientBuilder>,
EndpointTrait<ShareFileClientBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(ShareFileClientBuilder.class);
private String endpoint;
private String accountName;
private String shareName;
private String shareSnapshot;
private String resourcePath;
private StorageSharedKeyCredential storageSharedKeyCredential;
private AzureSasCredential azureSasCredential;
private TokenCredential tokenCredential;
private String sasToken;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions;
private RetryOptions coreRetryOptions;
private HttpPipeline httpPipeline;
private ClientOptions clientOptions = new ClientOptions();
private Configuration configuration;
private ShareServiceVersion version;
private ShareTokenIntent shareTokenIntent;
private boolean allowSourceTrailingDot;
private boolean allowTrailingDot;
private ShareAudience audience;
/**
* Creates a builder instance that is able to configure and construct {@link ShareFileClient FileClients} and {@link
* ShareFileAsyncClient FileAsyncClients}.
*/
public ShareFileClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
private AzureFileStorageImpl constructImpl() {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(resourcePath, "'resourcePath' cannot be null.");
CredentialValidator.validateSingleCredentialIsPresent(
storageSharedKeyCredential, null, azureSasCredential, sasToken, LOGGER);
HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(
storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken,
endpoint, retryOptions, coreRetryOptions, logOptions,
clientOptions, httpClient, perCallPolicies, perRetryPolicies, configuration, audience, LOGGER);
return new AzureFileStorageImpl(pipeline, this.version.getVersion(), shareTokenIntent, endpoint,
allowTrailingDot, allowSourceTrailingDot);
}
/**
* Creates a {@link ShareDirectoryAsyncClient} based on options set in the builder. Every time
* {@code buildFileAsyncClient()} is called a new instance of {@link ShareDirectoryAsyncClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* {@link ShareDirectoryAsyncClient client}. All other builder settings are ignored.
* </p>
*
* @return A ShareAsyncClient with the options set from the builder.
* @throws NullPointerException If {@code shareName} is {@code null} or {@code shareName} is {@code null}.
* @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareDirectoryAsyncClient buildDirectoryAsyncClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareDirectoryAsyncClient(constructImpl(), shareName, resourcePath,
shareSnapshot, accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Creates a {@link ShareDirectoryClient} based on options set in the builder. Every time
* {@code buildDirectoryClient()} is called a new instance of {@link ShareDirectoryClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* All other builder settings are ignored.
* </p>
*
* @return A ShareDirectoryClient with the options set from the builder.
* @throws NullPointerException If {@code endpoint}, {@code shareName} or {@code directoryPath} is {@code null}.
* @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareDirectoryClient buildDirectoryClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareDirectoryClient(constructImpl(), shareName, resourcePath,
shareSnapshot, accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Creates a {@link ShareFileAsyncClient} based on options set in the builder. Every time
* {@code buildFileAsyncClient()} is called a new instance of {@link ShareFileAsyncClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* All other builder settings are ignored.
* </p>
*
* @return A ShareAsyncClient with the options set from the builder.
* @throws NullPointerException If {@code shareName} is {@code null} or the (@code resourcePath) is {@code null}.
* @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareFileAsyncClient buildFileAsyncClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareFileAsyncClient(constructImpl(), shareName, resourcePath, shareSnapshot,
accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Creates a {@link ShareFileClient} based on options set in the builder. Every time {@code buildFileClient()} is
* called a new instance of {@link ShareFileClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* All other builder settings are ignored.
* </p>
*
* @return A ShareFileClient with the options set from the builder.
* @throws NullPointerException If {@code endpoint}, {@code shareName} or {@code resourcePath} is {@code null}.
* @throws IllegalStateException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareFileClient buildFileClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareFileClient(new ShareFileAsyncClient(constructImpl(), shareName, resourcePath, shareSnapshot,
accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential), constructImpl(), shareName, resourcePath, shareSnapshot,
accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Sets the endpoint for the Azure Storage File instance that the client will interact with.
*
* <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share
* that the client will interact with. Rest of the path segments should be the path of the file. It mush end up with
* the file name if more segments exist.</p>
*
* <p>Query parameters of the endpoint will be parsed in an attempt to generate a SAS token to authenticate
* requests sent to the service.</p>
*
* @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses
* from.
* @return the updated ShareFileClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL
*/
@Override
public ShareFileClientBuilder endpoint(String endpoint) {
try {
URL fullUrl = new URL(endpoint);
this.endpoint = fullUrl.getProtocol() + ":
this.accountName = BuilderHelper.getAccountName(fullUrl);
String[] pathSegments = fullUrl.getPath().split("/");
int length = pathSegments.length;
this.shareName = length >= 2 ? pathSegments[1] : this.shareName;
String[] filePathParams = length >= 3 ? Arrays.copyOfRange(pathSegments, 2, length) : null;
this.resourcePath = filePathParams != null ? String.join("/", filePathParams) : this.resourcePath;
Map<String, String[]> queryParamsMap = SasImplUtils.parseQueryString(fullUrl.getQuery());
String[] snapshotArray = queryParamsMap.remove("sharesnapshot");
if (snapshotArray != null) {
this.shareSnapshot = snapshotArray[0];
}
String sasToken = new CommonSasQueryParameters(
SasImplUtils.parseQueryString(fullUrl.getQuery()), false).encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("The Azure Storage File endpoint url is malformed.", ex));
}
return this;
}
/**
* Sets the share that the constructed clients will interact with
*
* @param shareName Name of the share
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code shareName} is {@code null}.
*/
public ShareFileClientBuilder shareName(String shareName) {
this.shareName = shareName;
return this;
}
/**
* Sets the shareSnapshot that the constructed clients will interact with. This shareSnapshot must be linked to the
* share that has been specified in the builder.
*
* @param snapshot Identifier of the shareSnapshot
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code shareSnapshot} is {@code null}.
*/
public ShareFileClientBuilder snapshot(String snapshot) {
this.shareSnapshot = snapshot;
return this;
}
/**
* Sets the file that the constructed clients will interact with
*
* @param resourcePath Path of the file (or directory).
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code resourcePath} is {@code null}.
*/
public ShareFileClientBuilder resourcePath(String resourcePath) {
this.resourcePath = resourcePath;
return this;
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public ShareFileClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureNamedKeyCredential}.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public ShareFileClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential));
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public ShareFileClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters
* (with or without a leading '?') and not a full url.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public ShareFileClientBuilder sasToken(String sasToken) {
this.sasToken = Objects.requireNonNull(sasToken,
"'sasToken' cannot be null.");
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Sets the {@link AzureSasCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureSasCredential} used to authorize requests sent to the service.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public ShareFileClientBuilder credential(AzureSasCredential credential) {
this.azureSasCredential = Objects.requireNonNull(credential,
"'credential' cannot be null.");
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated ShareFileClientBuilder
* @throws IllegalArgumentException If {@code connectionString} in invalid.
*/
@Override
public ShareFileClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, LOGGER);
StorageEndpoint endpoint = storageConnectionString.getFileEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw LOGGER
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive file service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
LOGGER.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}.
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
@Override
public ShareFileClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null");
if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(pipelinePolicy);
} else {
perRetryPolicies.add(pipelinePolicy);
}
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to
* and from the service.
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
@Override
public ShareFileClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default log options with Storage headers and query parameters.
*
* @return the default log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated ShareFileClientBuilder object.
*/
public ShareFileClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
* Consider using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder retryOptions(RetryOptions retryOptions) {
this.coreRetryOptions = retryOptions;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* The {@link
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @see HttpClientOptions
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code clientOptions} is {@code null}.
*/
@Override
public ShareFileClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link ShareServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link ShareServiceVersion} of the service to be used when making requests.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder serviceVersion(ShareServiceVersion version) {
this.version = version;
return this;
}
/**
* Set the trailing dot property to specify whether trailing dot will be trimmed or not from the source URI.
*
* If set to true, trailing dot (.) will be allowed to suffix directory and file names.
* If false, the trailing dot will be trimmed. Supported by x-ms-version 2022-11-02 and above.
*
* @param allowSourceTrailingDot the allowSourceTrailingDot value.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder allowSourceTrailingDot(boolean allowSourceTrailingDot) {
this.allowSourceTrailingDot = allowSourceTrailingDot;
return this;
}
/**
* Set the trailing dot property to specify whether trailing dot will be trimmed or not from the target URI.
*
* If set to true, trailing dot (.) will be allowed to suffix directory and file names.
* If false, the trailing dot will be trimmed. Supported by x-ms-version 2022-11-02 and above.
*
* @param allowTrailingDot the allowTrailingDot value.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder allowTrailingDot(boolean allowTrailingDot) {
this.allowTrailingDot = allowTrailingDot;
return this;
}
/**
* Sets the {@link ShareTokenIntent} that specifies whether there is intent for a file to be backed up.
* This is currently required when using {@link TokenCredential}, and ignored for other forms of authentication.
*
* @param shareTokenIntent the {@link ShareTokenIntent} value.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder shareTokenIntent(ShareTokenIntent shareTokenIntent) {
this.shareTokenIntent = shareTokenIntent;
return this;
}
/**
* Sets the Audience to use for authentication with Azure Active Directory (AAD). The audience is not considered
* when using a shared key.
* @param audience {@link ShareAudience} to be used when requesting a token from Azure Active Directory (AAD).
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder audience(ShareAudience audience) {
this.audience = audience;
return this;
}
} | class ShareFileClientBuilder implements
TokenCredentialTrait<ShareFileClientBuilder>,
HttpTrait<ShareFileClientBuilder>,
ConnectionStringTrait<ShareFileClientBuilder>,
AzureNamedKeyCredentialTrait<ShareFileClientBuilder>,
AzureSasCredentialTrait<ShareFileClientBuilder>,
ConfigurationTrait<ShareFileClientBuilder>,
EndpointTrait<ShareFileClientBuilder> {
private static final ClientLogger LOGGER = new ClientLogger(ShareFileClientBuilder.class);
private String endpoint;
private String accountName;
private String shareName;
private String shareSnapshot;
private String resourcePath;
private StorageSharedKeyCredential storageSharedKeyCredential;
private AzureSasCredential azureSasCredential;
private TokenCredential tokenCredential;
private String sasToken;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>();
private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions;
private RetryOptions coreRetryOptions;
private HttpPipeline httpPipeline;
private ClientOptions clientOptions = new ClientOptions();
private Configuration configuration;
private ShareServiceVersion version;
private ShareTokenIntent shareTokenIntent;
private boolean allowSourceTrailingDot;
private boolean allowTrailingDot;
private ShareAudience audience;
/**
* Creates a builder instance that is able to configure and construct {@link ShareFileClient FileClients} and {@link
* ShareFileAsyncClient FileAsyncClients}.
*/
public ShareFileClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
private AzureFileStorageImpl constructImpl() {
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(resourcePath, "'resourcePath' cannot be null.");
CredentialValidator.validateSingleCredentialIsPresent(
storageSharedKeyCredential, null, azureSasCredential, sasToken, LOGGER);
HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(
storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken,
endpoint, retryOptions, coreRetryOptions, logOptions,
clientOptions, httpClient, perCallPolicies, perRetryPolicies, configuration, audience, LOGGER);
return new AzureFileStorageImpl(pipeline, getServiceVersion().getVersion(), shareTokenIntent, endpoint,
allowTrailingDot, allowSourceTrailingDot);
}
/**
* Creates a {@link ShareDirectoryAsyncClient} based on options set in the builder. Every time
* {@code buildFileAsyncClient()} is called a new instance of {@link ShareDirectoryAsyncClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* {@link ShareDirectoryAsyncClient client}. All other builder settings are ignored.
* </p>
*
* @return A ShareAsyncClient with the options set from the builder.
* @throws NullPointerException If {@code shareName} is {@code null} or {@code shareName} is {@code null}.
* @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareDirectoryAsyncClient buildDirectoryAsyncClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareDirectoryAsyncClient(constructImpl(), shareName, resourcePath,
shareSnapshot, accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Creates a {@link ShareDirectoryClient} based on options set in the builder. Every time
* {@code buildDirectoryClient()} is called a new instance of {@link ShareDirectoryClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* All other builder settings are ignored.
* </p>
*
* @return A ShareDirectoryClient with the options set from the builder.
* @throws NullPointerException If {@code endpoint}, {@code shareName} or {@code directoryPath} is {@code null}.
* @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareDirectoryClient buildDirectoryClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareDirectoryClient(constructImpl(), shareName, resourcePath,
shareSnapshot, accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Creates a {@link ShareFileAsyncClient} based on options set in the builder. Every time
* {@code buildFileAsyncClient()} is called a new instance of {@link ShareFileAsyncClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* All other builder settings are ignored.
* </p>
*
* @return A ShareAsyncClient with the options set from the builder.
* @throws NullPointerException If {@code shareName} is {@code null} or the (@code resourcePath) is {@code null}.
* @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareFileAsyncClient buildFileAsyncClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareFileAsyncClient(constructImpl(), shareName, resourcePath, shareSnapshot,
accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Creates a {@link ShareFileClient} based on options set in the builder. Every time {@code buildFileClient()} is
* called a new instance of {@link ShareFileClient} is created.
*
* <p>
* If {@link ShareFileClientBuilder
* ShareFileClientBuilder
* All other builder settings are ignored.
* </p>
*
* @return A ShareFileClient with the options set from the builder.
* @throws NullPointerException If {@code endpoint}, {@code shareName} or {@code resourcePath} is {@code null}.
* @throws IllegalStateException If neither a {@link StorageSharedKeyCredential}
* or {@link
* @throws IllegalStateException If multiple credentials have been specified.
*/
public ShareFileClient buildFileClient() {
ShareServiceVersion serviceVersion = getServiceVersion();
return new ShareFileClient(new ShareFileAsyncClient(constructImpl(), shareName, resourcePath, shareSnapshot,
accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential), constructImpl(), shareName, resourcePath, shareSnapshot,
accountName, serviceVersion, sasToken != null ? new AzureSasCredential(sasToken) : azureSasCredential);
}
/**
* Sets the endpoint for the Azure Storage File instance that the client will interact with.
*
* <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share
* that the client will interact with. Rest of the path segments should be the path of the file. It mush end up with
* the file name if more segments exist.</p>
*
* <p>Query parameters of the endpoint will be parsed in an attempt to generate a SAS token to authenticate
* requests sent to the service.</p>
*
* @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses
* from.
* @return the updated ShareFileClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL
*/
@Override
public ShareFileClientBuilder endpoint(String endpoint) {
try {
URL fullUrl = new URL(endpoint);
this.endpoint = fullUrl.getProtocol() + ":
this.accountName = BuilderHelper.getAccountName(fullUrl);
String[] pathSegments = fullUrl.getPath().split("/");
int length = pathSegments.length;
this.shareName = length >= 2 ? pathSegments[1] : this.shareName;
String[] filePathParams = length >= 3 ? Arrays.copyOfRange(pathSegments, 2, length) : null;
this.resourcePath = filePathParams != null ? String.join("/", filePathParams) : this.resourcePath;
Map<String, String[]> queryParamsMap = SasImplUtils.parseQueryString(fullUrl.getQuery());
String[] snapshotArray = queryParamsMap.remove("sharesnapshot");
if (snapshotArray != null) {
this.shareSnapshot = snapshotArray[0];
}
String sasToken = new CommonSasQueryParameters(
SasImplUtils.parseQueryString(fullUrl.getQuery()), false).encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("The Azure Storage File endpoint url is malformed.", ex));
}
return this;
}
/**
* Sets the share that the constructed clients will interact with
*
* @param shareName Name of the share
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code shareName} is {@code null}.
*/
public ShareFileClientBuilder shareName(String shareName) {
this.shareName = shareName;
return this;
}
/**
* Sets the shareSnapshot that the constructed clients will interact with. This shareSnapshot must be linked to the
* share that has been specified in the builder.
*
* @param snapshot Identifier of the shareSnapshot
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code shareSnapshot} is {@code null}.
*/
public ShareFileClientBuilder snapshot(String snapshot) {
this.shareSnapshot = snapshot;
return this;
}
/**
* Sets the file that the constructed clients will interact with
*
* @param resourcePath Path of the file (or directory).
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code resourcePath} is {@code null}.
*/
public ShareFileClientBuilder resourcePath(String resourcePath) {
this.resourcePath = resourcePath;
return this;
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public ShareFileClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureNamedKeyCredential}.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public ShareFileClientBuilder credential(AzureNamedKeyCredential credential) {
Objects.requireNonNull(credential, "'credential' cannot be null.");
return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential));
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java
* <a href="https:
* documentation for more details on proper usage of the {@link TokenCredential} type.
*
* @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public ShareFileClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = Objects.requireNonNull(tokenCredential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasToken = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters
* (with or without a leading '?') and not a full url.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public ShareFileClientBuilder sasToken(String sasToken) {
this.sasToken = Objects.requireNonNull(sasToken,
"'sasToken' cannot be null.");
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Sets the {@link AzureSasCredential} used to authorize requests sent to the service.
*
* @param credential {@link AzureSasCredential} used to authorize requests sent to the service.
* @return the updated ShareFileClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
@Override
public ShareFileClientBuilder credential(AzureSasCredential credential) {
this.azureSasCredential = Objects.requireNonNull(credential,
"'credential' cannot be null.");
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated ShareFileClientBuilder
* @throws IllegalArgumentException If {@code connectionString} in invalid.
*/
@Override
public ShareFileClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, LOGGER);
StorageEndpoint endpoint = storageConnectionString.getFileEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw LOGGER
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive file service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the {@link HttpClient} to use for sending and receiving requests to and from the service.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param httpClient The {@link HttpClient} to use for requests.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
LOGGER.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}.
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
@Override
public ShareFileClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null");
if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) {
perCallPolicies.add(pipelinePolicy);
} else {
perRetryPolicies.add(pipelinePolicy);
}
return this;
}
/**
* Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from
* the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to
* and from the service.
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
@Override
public ShareFileClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default log options with Storage headers and query parameters.
*
* @return the default log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* Setting this is mutually exclusive with using {@link
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated ShareFileClientBuilder object.
*/
public ShareFileClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/**
* Sets the {@link RetryOptions} for all the requests made through the client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* Setting this is mutually exclusive with using {@link
* Consider using {@link
*
* @param retryOptions The {@link RetryOptions} to use for all the requests made through the client.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder retryOptions(RetryOptions retryOptions) {
this.coreRetryOptions = retryOptions;
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
* <p>
* The {@link
*
* @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses.
* @return the updated ShareFileClientBuilder object
*/
@Override
public ShareFileClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is
* recommended that this method be called with an instance of the {@link HttpClientOptions}
* class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more
* configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait
* interface.
*
* <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In
* particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and
* they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally
* based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this
* trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the
* documentation of types that implement this trait to understand the full set of implications.</p>
*
* @param clientOptions A configured instance of {@link HttpClientOptions}.
* @see HttpClientOptions
* @return the updated ShareFileClientBuilder object
* @throws NullPointerException If {@code clientOptions} is {@code null}.
*/
@Override
public ShareFileClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null.");
return this;
}
/**
* Sets the {@link ShareServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link ShareServiceVersion} of the service to be used when making requests.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder serviceVersion(ShareServiceVersion version) {
this.version = version;
return this;
}
/**
* Set the trailing dot property to specify whether trailing dot will be trimmed or not from the source URI.
*
* If set to true, trailing dot (.) will be allowed to suffix directory and file names.
* If false, the trailing dot will be trimmed. Supported by x-ms-version 2022-11-02 and above.
*
* @param allowSourceTrailingDot the allowSourceTrailingDot value.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder allowSourceTrailingDot(boolean allowSourceTrailingDot) {
this.allowSourceTrailingDot = allowSourceTrailingDot;
return this;
}
/**
* Set the trailing dot property to specify whether trailing dot will be trimmed or not from the target URI.
*
* If set to true, trailing dot (.) will be allowed to suffix directory and file names.
* If false, the trailing dot will be trimmed. Supported by x-ms-version 2022-11-02 and above.
*
* @param allowTrailingDot the allowTrailingDot value.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder allowTrailingDot(boolean allowTrailingDot) {
this.allowTrailingDot = allowTrailingDot;
return this;
}
/**
* Sets the {@link ShareTokenIntent} that specifies whether there is intent for a file to be backed up.
* This is currently required when using {@link TokenCredential}, and ignored for other forms of authentication.
*
* @param shareTokenIntent the {@link ShareTokenIntent} value.
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder shareTokenIntent(ShareTokenIntent shareTokenIntent) {
this.shareTokenIntent = shareTokenIntent;
return this;
}
/**
* Sets the Audience to use for authentication with Azure Active Directory (AAD). The audience is not considered
* when using a shared key.
* @param audience {@link ShareAudience} to be used when requesting a token from Azure Active Directory (AAD).
* @return the updated ShareFileClientBuilder object
*/
public ShareFileClientBuilder audience(ShareAudience audience) {
this.audience = audience;
return this;
}
} |
This timeout implementation is a bit wrong as it's applying it to the creation of the PagedIterable which will be instantaneous, it should be applying it to each of the get page requests instead. | public PagedIterable<ShareItem> listShares(ListSharesOptions options, Duration timeout, Context context) {
Supplier<PagedIterable<ShareItem>> operation = () -> listShares(options, context);
try {
return timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
} | return timeout != null | public PagedIterable<ShareItem> listShares(ListSharesOptions options, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
final String prefix = (options != null) ? options.getPrefix() : null;
final Integer maxResultsPerPage = (options != null) ? options.getMaxResultsPerPage() : null;
List<ListSharesIncludeType> include = new ArrayList<>();
if (options != null) {
if (options.isIncludeDeleted()) {
include.add(ListSharesIncludeType.DELETED);
}
if (options.isIncludeMetadata()) {
include.add(ListSharesIncludeType.METADATA);
}
if (options.isIncludeSnapshots()) {
include.add(ListSharesIncludeType.SNAPSHOTS);
}
}
BiFunction<String, Integer, PagedResponse<ShareItem>> retriever =
(nextMarker, pageSize) -> {
Callable<PagedResponse<ShareItemInternal>> operation = () -> this.azureFileStorageClient.getServices()
.listSharesSegmentSinglePage(prefix, nextMarker, pageSize == null ? maxResultsPerPage : pageSize,
include, null, finalContext);
try {
PagedResponse<ShareItemInternal> response = timeout != null
? THREAD_POOL.submit(operation).get(timeout.toMillis(), TimeUnit.MILLISECONDS)
: operation.call();
List<ShareItem> value = response.getValue() == null ? Collections.emptyList()
: response.getValue().stream().map(ModelHelper::populateShareItem).collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getContinuationToken(),
ModelHelper.transformListSharesHeaders(response.getHeaders()));
} catch (Exception e) {
throw LOGGER.logExceptionAsError(new RuntimeException("Failed to retrieve shares with timeout.", e));
}
};
return new PagedIterable<>(pageSize -> retriever.apply(null, pageSize), retriever);
} | class ShareServiceClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareServiceClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareServiceClient.
* @param azureFileStorage Client that interacts with the service interfaces
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareServiceClient(AzureFileStorageImpl azureFileStorage, String accountName, ShareServiceVersion serviceVersion,
AzureSasCredential sasToken) {
this.azureFileStorageClient = azureFileStorage;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage file service client.
*
* @return the url of the Storage File service.
*/
public String getFileServiceUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Constructs a ShareClient that interacts with the specified share.
*
* <p>If the share doesn't exist in the storage account {@link ShareClient
* shareServiceAsyncClient will
* need to be called before interaction with the share can happen.</p>
*
* @param shareName Name of the share
* @return a ShareClient that interacts with the specified share
*/
public ShareClient getShareClient(String shareName) {
return new ShareClient(azureFileStorageClient, shareName, null, accountName, serviceVersion, sasToken);
}
/**
* Lists all shares in the storage account without their metadata or snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all shares in the account</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.listShares -->
* <pre>
* fileServiceClient.listShares&
* shareItem -> System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.listShares -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareItem Shares} in the storage account without their metadata or snapshots
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareItem> listShares() {
return listShares(null, null, null);
}
/**
* Lists the shares in the Storage account that pass the options filter.
*
* <p>Set starts with name filter using {@link ListSharesOptions
* are
* listed.</p>
*
* <p>Pass true to {@link ListSharesOptions
* for
* the shares.</p>
*
* <p>Pass true to {@link ListSharesOptions
* shares listed.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all shares that begin with "azure"</p>
*
* <!-- src_embed ShareServiceClient.listShares
* <pre>
* fileServiceClient.listShares&
* new Context&
* shareItem -> System.out.printf&
* &
* </pre>
* <!-- end ShareServiceClient.listShares
*
* <p>List all shares including their snapshots and metadata</p>
*
* <!-- src_embed ShareServiceClient.listShares
* <pre>
* fileServiceClient.listShares&
* .setIncludeSnapshots&
* shareItem -> System.out.printf&
* &
* </pre>
* <!-- end ShareServiceClient.listShares
*
* <p>For more information, see the
* <a href="https:
*
* @param options Options for listing shares. If iterating by page, the page size passed to byPage methods such as
* {@link PagedIterable
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareItem Shares} in the storage account that satisfy the filter requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
PagedIterable<ShareItem> listShares(ListSharesOptions options, Context context) {
Context finalContext = context == null ? Context.NONE : context;
final String prefix = (options != null) ? options.getPrefix() : null;
final Integer maxResultsPerPage = (options != null) ? options.getMaxResultsPerPage() : null;
List<ListSharesIncludeType> include = new ArrayList<>();
if (options != null) {
if (options.isIncludeDeleted()) {
include.add(ListSharesIncludeType.DELETED);
}
if (options.isIncludeMetadata()) {
include.add(ListSharesIncludeType.METADATA);
}
if (options.isIncludeSnapshots()) {
include.add(ListSharesIncludeType.SNAPSHOTS);
}
}
BiFunction<String, Integer, PagedResponse<ShareItem>> retriever =
(nextMarker, pageSize) -> {
PagedResponse<ShareItemInternal> response = this.azureFileStorageClient.getServices().listSharesSegmentSinglePage(
prefix, nextMarker, pageSize == null ? maxResultsPerPage : pageSize, include, null, finalContext);
List<ShareItem> value = response.getValue() == null ? Collections.emptyList()
: response.getValue().stream().map(ModelHelper::populateShareItem).collect(Collectors.toList());
return new PagedResponseBase<>(
response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
value,
response.getContinuationToken(),
ModelHelper.transformListSharesHeaders(response.getHeaders()));
};
return new PagedIterable<>(pageSize -> retriever.apply(null, pageSize), retriever);
}
/**
* Retrieves the properties of the storage account's File service. The properties range from storage analytics and
* metrics to CORS (Cross-Origin Resource Sharing).
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve File service properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.getProperties -->
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
* System.out.printf&
* properties.getMinuteMetrics&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @return Storage account {@link ShareServiceProperties File service properties}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareServiceProperties getProperties() {
return getPropertiesWithResponse(null, Context.NONE).getValue();
}
/**
* Retrieves the properties of the storage account's File service. The properties range from storage analytics and
* metrics to CORS (Cross-Origin Resource Sharing).
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve File service properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.getPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* properties.getMinuteMetrics&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the Storage account {@link ShareServiceProperties File service properties} with
* headers and response status code
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareServiceProperties> getPropertiesWithResponse(Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<ServicesGetPropertiesHeaders, ShareServiceProperties>> operation = () ->
this.azureFileStorageClient.getServices().getPropertiesWithResponse(null, finalContext);
ResponseBase<ServicesGetPropertiesHeaders, ShareServiceProperties> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
return new SimpleResponse<>(response, response.getValue());
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Sets the properties for the storage account's File service. The properties range from storage analytics and
* metric to CORS (Cross-Origin Resource Sharing).
*
* To maintain the CORS in the Queue service pass a {@code null} value for {@link ShareServiceProperties
* CORS}. To disable all CORS in the Queue service pass an empty list for {@link ShareServiceProperties
* CORS}.
*
* <p><strong>Code Sample</strong></p>
*
* <p>Clear CORS in the File service</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
* properties.setCors&
*
* Response<Void> response = fileServiceClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
*
* <p>Enable Minute and Hour Metrics</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setProperties
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
*
* properties.getMinuteMetrics&
* properties.getHourMetrics&
*
* fileServiceClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setProperties
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @param properties Storage account File service properties
* @throws ShareStorageException When one of the following is true
* <ul>
* <li>A CORS rule is missing one of its fields</li>
* <li>More than five CORS rules will exist for the Queue service</li>
* <li>Size of all CORS rules exceeds 2KB</li>
* <li>
* Length of {@link ShareCorsRule
* exposed headers}, or {@link ShareCorsRule
* </li>
* <li>{@link ShareCorsRule
* PUT</li>
* </ul>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setProperties(ShareServiceProperties properties) {
setPropertiesWithResponse(properties, null, Context.NONE);
}
/**
* Sets the properties for the storage account's File service. The properties range from storage analytics and
* metric to CORS (Cross-Origin Resource Sharing).
*
* To maintain the CORS in the Queue service pass a {@code null} value for {@link ShareServiceProperties
* CORS}. To disable all CORS in the Queue service pass an empty list for {@link ShareServiceProperties
* CORS}.
*
* <p><strong>Code Sample</strong></p>
*
* <p>Clear CORS in the File service</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
* properties.setCors&
*
* Response<Void> response = fileServiceClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
*
* <p>Enable Minute and Hour Metrics</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getPropertiesWithResponse&
* Duration.ofSeconds&
*
* properties.getMinuteMetrics&
* properties.getHourMetrics&
*
* Response<Void> response = fileServiceClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @param properties Storage account File service properties
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException When one of the following is true
* <ul>
* <li>A CORS rule is missing one of its fields</li>
* <li>More than five CORS rules will exist for the Queue service</li>
* <li>Size of all CORS rules exceeds 2KB</li>
* <li>
* Length of {@link ShareCorsRule
* exposed headers}, or {@link ShareCorsRule
* </li>
* <li>{@link ShareCorsRule
* PUT</li>
* </ul>
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setPropertiesWithResponse(ShareServiceProperties properties, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<ServicesSetPropertiesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getServices().setPropertiesWithResponse(properties, null, finalContext);
return timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Creates a share in the storage account with the specified name and returns a ShareClient to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the share with share name of "myshare"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.createShare
* <pre>
* fileServiceClient.createShare&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.createShare
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @return The {@link ShareClient ShareClient}
* @throws ShareStorageException If a share with the same name already exists
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareClient createShare(String shareName) {
return createShareWithResponse(shareName, null, null, null, Context.NONE).getValue();
}
/**
* Creates a share in the storage account with the specified name and metadata and returns a ShareClient to interact
* with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the share "test" with a quota of 10 GB</p>
*
* <!-- src_embed ShareServiceClient.createShareWithResponse
* <pre>
* Response<ShareClient> response = fileServiceClient.createShareWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end ShareServiceClient.createShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @param metadata Optional metadata to associate with the share
* @param quotaInGB Optional maximum size the share is allowed to grow to in GB. This must be greater than 0 and
* less than or equal to 5120. The default value is 5120.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareClient ShareClient} and the status of creating the share.
* @throws ShareStorageException If a share with the same name already exists or {@code quotaInGB} is outside the
* allowed range.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareClient> createShareWithResponse(String shareName, Map<String, String> metadata,
Integer quotaInGB, Duration timeout, Context context) {
ShareClient shareClient = getShareClient(shareName);
return new SimpleResponse<>(shareClient.createWithResponse(metadata, quotaInGB, timeout, context), shareClient);
}
/**
* Creates a share in the storage account with the specified name and options and returns a ShareClient to interact
* with it.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed ShareServiceClient.createShareWithResponse
* <pre>
* Response<ShareClient> response = fileServiceClient.createShareWithResponse&
* new ShareCreateOptions&
* .setAccessTier&
* System.out.printf&
* </pre>
* <!-- end ShareServiceClient.createShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @param options {@link ShareCreateOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareClient ShareClient} and the status of creating the share.
* @throws ShareStorageException If a share with the same name already exists or {@code quotaInGB} is outside the
* allowed range.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareClient> createShareWithResponse(String shareName, ShareCreateOptions options,
Duration timeout, Context context) {
ShareClient shareClient = getShareClient(shareName);
return new SimpleResponse<>(shareClient.createWithResponse(options, timeout, context), shareClient);
}
/**
* Deletes the share in the storage account with the given name
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the share "test"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.deleteShare
* <pre>
* fileServiceClient.deleteShare&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.deleteShare
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @throws ShareStorageException If the share doesn't exist
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteShare(String shareName) {
deleteShareWithResponse(shareName, null, null, Context.NONE);
}
/**
* Deletes the specific snapshot of the share in the storage account with the given name. Snapshot are identified by
* the time they were created.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the snapshot of share "test" that was created at current time. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.deleteShareWithResponse
* <pre>
* OffsetDateTime midnight = OffsetDateTime.of&
* Response<Void> response = fileServiceClient.deleteShareWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.deleteShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @param snapshot Identifier of the snapshot
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the share doesn't exist or the snapshot doesn't exist
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteShareWithResponse(String shareName, String snapshot, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
DeleteSnapshotsOptionType deleteSnapshots = CoreUtils.isNullOrEmpty(snapshot)
? DeleteSnapshotsOptionType.INCLUDE : null;
try {
Supplier<ResponseBase<SharesDeleteHeaders, Void>> operation = () -> this.azureFileStorageClient.getShares()
.deleteWithResponse(shareName, snapshot, null, deleteSnapshots, null, finalContext);
return timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return this.azureFileStorageClient.getHttpPipeline();
}
/**
* Generates an account SAS for the Azure Storage account using the specified {@link AccountSasSignatureValues}.
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link AccountSasSignatureValues} for more information on how to construct an account SAS.</p>
*
* <p><strong>Generating an account SAS</strong></p>
* <p>The snippet below generates an AccountSasSignatureValues object that lasts for two days and gives the user
* read and list access to blob and file shares.</p>
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.generateAccountSas
* <pre>
* AccountSasPermission permissions = new AccountSasPermission&
* .setListPermission&
* .setReadPermission&
* AccountSasResourceType resourceTypes = new AccountSasResourceType&
* AccountSasService services = new AccountSasService&
* OffsetDateTime expiryTime = OffsetDateTime.now&
*
* AccountSasSignatureValues sasValues =
* new AccountSasSignatureValues&
*
* &
* String sas = fileServiceClient.generateAccountSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.generateAccountSas
*
* @param accountSasSignatureValues {@link AccountSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateAccountSas(AccountSasSignatureValues accountSasSignatureValues) {
return this.generateAccountSas(accountSasSignatureValues, Context.NONE);
}
/**
* Generates an account SAS for the Azure Storage account using the specified {@link AccountSasSignatureValues}.
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link AccountSasSignatureValues} for more information on how to construct an account SAS.</p>
*
* <p>The snippet below generates a SAS that lasts for two days and gives the user read and list access to blob
* containers and file shares.</p>
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.generateAccountSas
* <pre>
* AccountSasPermission permissions = new AccountSasPermission&
* .setListPermission&
* .setReadPermission&
* AccountSasResourceType resourceTypes = new AccountSasResourceType&
* AccountSasService services = new AccountSasService&
* OffsetDateTime expiryTime = OffsetDateTime.now&
*
* AccountSasSignatureValues sasValues =
* new AccountSasSignatureValues&
*
* &
* String sas = fileServiceClient.generateAccountSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.generateAccountSas
*
* @param accountSasSignatureValues {@link AccountSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateAccountSas(AccountSasSignatureValues accountSasSignatureValues, Context context) {
return new AccountSasImplUtil(accountSasSignatureValues, null)
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
/**
* Restores a previously deleted share.
* <p>
* If the share associated with provided <code>deletedShareName</code>
* already exists, this call will result in a 409 (conflict).
* </p>
* <p>
* This API is only functional if Share Soft Delete is enabled
* for the storage account associated with the share.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.undeleteShare
* <pre>
* ListSharesOptions listSharesOptions = new ListSharesOptions&
* listSharesOptions.setIncludeDeleted&
* fileServiceClient.listShares&
* deletedShare -> &
* ShareClient shareClient = fileServiceClient.undeleteShare&
* deletedShare.getName&
* &
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.undeleteShare
*
* <p>For more information, see the
* <a href="https:
*
* @param deletedShareName The name of the previously deleted share.
* @param deletedShareVersion The version of the previously deleted share.
* @return A {@link ShareClient} used
* to interact with the restored share.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareClient undeleteShare(String deletedShareName, String deletedShareVersion) {
return this.undeleteShareWithResponse(deletedShareName, deletedShareVersion, null, Context.NONE)
.getValue();
}
/**
* Restores a previously deleted share.
* <p>
* If the share associated with provided <code>deletedShareName</code>
* already exists, this call will result in a 409 (conflict).
* </p>
* <p>
* This API is only functional if Share Soft Delete is enabled
* for the storage account associated with the share.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.undeleteShareWithResponse
* <pre>
* ListSharesOptions listSharesOptions = new ListSharesOptions&
* listSharesOptions.setIncludeDeleted&
* fileServiceClient.listShares&
* deletedShare -> &
* ShareClient shareClient = fileServiceClient.undeleteShareWithResponse&
* deletedShare.getName&
* &
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.undeleteShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deletedShareName The name of the previously deleted share.
* @param deletedShareVersion The version of the previously deleted share.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} whose {@link Response
* to interact with the restored share.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareClient> undeleteShareWithResponse(String deletedShareName, String deletedShareVersion,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<SharesRestoreHeaders, Void>> operation = () -> this.azureFileStorageClient.getShares()
.restoreWithResponse(deletedShareName, null, null, deletedShareName, deletedShareVersion, finalContext);
ResponseBase<SharesRestoreHeaders, Void> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
return new SimpleResponse<>(response, getShareClient(deletedShareName));
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
} | class ShareServiceClient {
private static final ClientLogger LOGGER = new ClientLogger(ShareServiceClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
/**
* Creates a ShareServiceClient.
* @param azureFileStorage Client that interacts with the service interfaces
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareServiceClient(AzureFileStorageImpl azureFileStorage, String accountName, ShareServiceVersion serviceVersion,
AzureSasCredential sasToken) {
this.azureFileStorageClient = azureFileStorage;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
}
/**
* Get the url of the storage file service client.
*
* @return the url of the Storage File service.
*/
public String getFileServiceUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Constructs a ShareClient that interacts with the specified share.
*
* <p>If the share doesn't exist in the storage account {@link ShareClient
* shareServiceAsyncClient will
* need to be called before interaction with the share can happen.</p>
*
* @param shareName Name of the share
* @return a ShareClient that interacts with the specified share
*/
public ShareClient getShareClient(String shareName) {
return new ShareClient(azureFileStorageClient, shareName, null, accountName, serviceVersion, sasToken);
}
/**
* Lists all shares in the storage account without their metadata or snapshots.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all shares in the account</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.listShares -->
* <pre>
* fileServiceClient.listShares&
* shareItem -> System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.listShares -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareItem Shares} in the storage account without their metadata or snapshots
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareItem> listShares() {
return listShares(null, null, null);
}
/**
* Lists the shares in the Storage account that pass the options filter.
*
* <p>Set starts with name filter using {@link ListSharesOptions
* are
* listed.</p>
*
* <p>Pass true to {@link ListSharesOptions
* for
* the shares.</p>
*
* <p>Pass true to {@link ListSharesOptions
* shares listed.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all shares that begin with "azure"</p>
*
* <!-- src_embed ShareServiceClient.listShares
* <pre>
* fileServiceClient.listShares&
* new Context&
* shareItem -> System.out.printf&
* &
* </pre>
* <!-- end ShareServiceClient.listShares
*
* <p>List all shares including their snapshots and metadata</p>
*
* <!-- src_embed ShareServiceClient.listShares
* <pre>
* fileServiceClient.listShares&
* .setIncludeSnapshots&
* shareItem -> System.out.printf&
* &
* </pre>
* <!-- end ShareServiceClient.listShares
*
* <p>For more information, see the
* <a href="https:
*
* @param options Options for listing shares. If iterating by page, the page size passed to byPage methods such as
* {@link PagedIterable
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareItem Shares} in the storage account that satisfy the filter requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
/**
* Retrieves the properties of the storage account's File service. The properties range from storage analytics and
* metrics to CORS (Cross-Origin Resource Sharing).
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve File service properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.getProperties -->
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
* System.out.printf&
* properties.getMinuteMetrics&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @return Storage account {@link ShareServiceProperties File service properties}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareServiceProperties getProperties() {
return getPropertiesWithResponse(null, Context.NONE).getValue();
}
/**
* Retrieves the properties of the storage account's File service. The properties range from storage analytics and
* metrics to CORS (Cross-Origin Resource Sharing).
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve File service properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.getPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* properties.getMinuteMetrics&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the Storage account {@link ShareServiceProperties File service properties} with
* headers and response status code
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareServiceProperties> getPropertiesWithResponse(Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<ServicesGetPropertiesHeaders, ShareServiceProperties>> operation = () ->
this.azureFileStorageClient.getServices().getPropertiesWithResponse(null, finalContext);
ResponseBase<ServicesGetPropertiesHeaders, ShareServiceProperties> response =
StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return new SimpleResponse<>(response, response.getValue());
}
/**
* Sets the properties for the storage account's File service. The properties range from storage analytics and
* metric to CORS (Cross-Origin Resource Sharing).
*
* To maintain the CORS in the Queue service pass a {@code null} value for {@link ShareServiceProperties
* CORS}. To disable all CORS in the Queue service pass an empty list for {@link ShareServiceProperties
* CORS}.
*
* <p><strong>Code Sample</strong></p>
*
* <p>Clear CORS in the File service</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
* properties.setCors&
*
* Response<Void> response = fileServiceClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
*
* <p>Enable Minute and Hour Metrics</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setProperties
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
*
* properties.getMinuteMetrics&
* properties.getHourMetrics&
*
* fileServiceClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setProperties
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @param properties Storage account File service properties
* @throws ShareStorageException When one of the following is true
* <ul>
* <li>A CORS rule is missing one of its fields</li>
* <li>More than five CORS rules will exist for the Queue service</li>
* <li>Size of all CORS rules exceeds 2KB</li>
* <li>
* Length of {@link ShareCorsRule
* exposed headers}, or {@link ShareCorsRule
* </li>
* <li>{@link ShareCorsRule
* PUT</li>
* </ul>
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setProperties(ShareServiceProperties properties) {
setPropertiesWithResponse(properties, null, Context.NONE);
}
/**
* Sets the properties for the storage account's File service. The properties range from storage analytics and
* metric to CORS (Cross-Origin Resource Sharing).
*
* To maintain the CORS in the Queue service pass a {@code null} value for {@link ShareServiceProperties
* CORS}. To disable all CORS in the Queue service pass an empty list for {@link ShareServiceProperties
* CORS}.
*
* <p><strong>Code Sample</strong></p>
*
* <p>Clear CORS in the File service</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getProperties&
* properties.setCors&
*
* Response<Void> response = fileServiceClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
*
* <p>Enable Minute and Hour Metrics</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
* <pre>
* ShareServiceProperties properties = fileServiceClient.getPropertiesWithResponse&
* Duration.ofSeconds&
*
* properties.getMinuteMetrics&
* properties.getHourMetrics&
*
* Response<Void> response = fileServiceClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
* Docs</a>.</p>
*
* @param properties Storage account File service properties
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException When one of the following is true
* <ul>
* <li>A CORS rule is missing one of its fields</li>
* <li>More than five CORS rules will exist for the Queue service</li>
* <li>Size of all CORS rules exceeds 2KB</li>
* <li>
* Length of {@link ShareCorsRule
* exposed headers}, or {@link ShareCorsRule
* </li>
* <li>{@link ShareCorsRule
* PUT</li>
* </ul>
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> setPropertiesWithResponse(ShareServiceProperties properties, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<ServicesSetPropertiesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getServices().setPropertiesWithResponse(properties, null, finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* Creates a share in the storage account with the specified name and returns a ShareClient to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the share with share name of "myshare"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.createShare
* <pre>
* fileServiceClient.createShare&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.createShare
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @return The {@link ShareClient ShareClient}
* @throws ShareStorageException If a share with the same name already exists
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareClient createShare(String shareName) {
return createShareWithResponse(shareName, null, null, null, Context.NONE).getValue();
}
/**
* Creates a share in the storage account with the specified name and metadata and returns a ShareClient to interact
* with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the share "test" with a quota of 10 GB</p>
*
* <!-- src_embed ShareServiceClient.createShareWithResponse
* <pre>
* Response<ShareClient> response = fileServiceClient.createShareWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end ShareServiceClient.createShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @param metadata Optional metadata to associate with the share
* @param quotaInGB Optional maximum size the share is allowed to grow to in GB. This must be greater than 0 and
* less than or equal to 5120. The default value is 5120.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareClient ShareClient} and the status of creating the share.
* @throws ShareStorageException If a share with the same name already exists or {@code quotaInGB} is outside the
* allowed range.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareClient> createShareWithResponse(String shareName, Map<String, String> metadata,
Integer quotaInGB, Duration timeout, Context context) {
ShareClient shareClient = getShareClient(shareName);
return new SimpleResponse<>(shareClient.createWithResponse(metadata, quotaInGB, timeout, context), shareClient);
}
/**
* Creates a share in the storage account with the specified name and options and returns a ShareClient to interact
* with it.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed ShareServiceClient.createShareWithResponse
* <pre>
* Response<ShareClient> response = fileServiceClient.createShareWithResponse&
* new ShareCreateOptions&
* .setAccessTier&
* System.out.printf&
* </pre>
* <!-- end ShareServiceClient.createShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @param options {@link ShareCreateOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareClient ShareClient} and the status of creating the share.
* @throws ShareStorageException If a share with the same name already exists or {@code quotaInGB} is outside the
* allowed range.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareClient> createShareWithResponse(String shareName, ShareCreateOptions options,
Duration timeout, Context context) {
ShareClient shareClient = getShareClient(shareName);
return new SimpleResponse<>(shareClient.createWithResponse(options, timeout, context), shareClient);
}
/**
* Deletes the share in the storage account with the given name
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the share "test"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.deleteShare
* <pre>
* fileServiceClient.deleteShare&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.deleteShare
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @throws ShareStorageException If the share doesn't exist
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void deleteShare(String shareName) {
deleteShareWithResponse(shareName, null, null, Context.NONE);
}
/**
* Deletes the specific snapshot of the share in the storage account with the given name. Snapshot are identified by
* the time they were created.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the snapshot of share "test" that was created at current time. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.deleteShareWithResponse
* <pre>
* OffsetDateTime midnight = OffsetDateTime.of&
* Response<Void> response = fileServiceClient.deleteShareWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.deleteShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param shareName Name of the share
* @param snapshot Identifier of the snapshot
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the share doesn't exist or the snapshot doesn't exist
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteShareWithResponse(String shareName, String snapshot, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
DeleteSnapshotsOptionType deleteSnapshots = CoreUtils.isNullOrEmpty(snapshot)
? DeleteSnapshotsOptionType.INCLUDE : null;
Callable<ResponseBase<SharesDeleteHeaders, Void>> operation = () -> this.azureFileStorageClient.getShares()
.deleteWithResponse(shareName, snapshot, null, deleteSnapshots, null, finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return this.azureFileStorageClient.getHttpPipeline();
}
/**
* Generates an account SAS for the Azure Storage account using the specified {@link AccountSasSignatureValues}.
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link AccountSasSignatureValues} for more information on how to construct an account SAS.</p>
*
* <p><strong>Generating an account SAS</strong></p>
* <p>The snippet below generates an AccountSasSignatureValues object that lasts for two days and gives the user
* read and list access to blob and file shares.</p>
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.generateAccountSas
* <pre>
* AccountSasPermission permissions = new AccountSasPermission&
* .setListPermission&
* .setReadPermission&
* AccountSasResourceType resourceTypes = new AccountSasResourceType&
* AccountSasService services = new AccountSasService&
* OffsetDateTime expiryTime = OffsetDateTime.now&
*
* AccountSasSignatureValues sasValues =
* new AccountSasSignatureValues&
*
* &
* String sas = fileServiceClient.generateAccountSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.generateAccountSas
*
* @param accountSasSignatureValues {@link AccountSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateAccountSas(AccountSasSignatureValues accountSasSignatureValues) {
return this.generateAccountSas(accountSasSignatureValues, Context.NONE);
}
/**
* Generates an account SAS for the Azure Storage account using the specified {@link AccountSasSignatureValues}.
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link AccountSasSignatureValues} for more information on how to construct an account SAS.</p>
*
* <p>The snippet below generates a SAS that lasts for two days and gives the user read and list access to blob
* containers and file shares.</p>
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.generateAccountSas
* <pre>
* AccountSasPermission permissions = new AccountSasPermission&
* .setListPermission&
* .setReadPermission&
* AccountSasResourceType resourceTypes = new AccountSasResourceType&
* AccountSasService services = new AccountSasService&
* OffsetDateTime expiryTime = OffsetDateTime.now&
*
* AccountSasSignatureValues sasValues =
* new AccountSasSignatureValues&
*
* &
* String sas = fileServiceClient.generateAccountSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.generateAccountSas
*
* @param accountSasSignatureValues {@link AccountSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateAccountSas(AccountSasSignatureValues accountSasSignatureValues, Context context) {
return new AccountSasImplUtil(accountSasSignatureValues, null)
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
/**
* Restores a previously deleted share.
* <p>
* If the share associated with provided <code>deletedShareName</code>
* already exists, this call will result in a 409 (conflict).
* </p>
* <p>
* This API is only functional if Share Soft Delete is enabled
* for the storage account associated with the share.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.undeleteShare
* <pre>
* ListSharesOptions listSharesOptions = new ListSharesOptions&
* listSharesOptions.setIncludeDeleted&
* fileServiceClient.listShares&
* deletedShare -> &
* ShareClient shareClient = fileServiceClient.undeleteShare&
* deletedShare.getName&
* &
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.undeleteShare
*
* <p>For more information, see the
* <a href="https:
*
* @param deletedShareName The name of the previously deleted share.
* @param deletedShareVersion The version of the previously deleted share.
* @return A {@link ShareClient} used
* to interact with the restored share.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareClient undeleteShare(String deletedShareName, String deletedShareVersion) {
return this.undeleteShareWithResponse(deletedShareName, deletedShareVersion, null, Context.NONE)
.getValue();
}
/**
* Restores a previously deleted share.
* <p>
* If the share associated with provided <code>deletedShareName</code>
* already exists, this call will result in a 409 (conflict).
* </p>
* <p>
* This API is only functional if Share Soft Delete is enabled
* for the storage account associated with the share.
* For more information, see the
* <a href="TBD">Azure Docs</a>.
* </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareServiceClient.undeleteShareWithResponse
* <pre>
* ListSharesOptions listSharesOptions = new ListSharesOptions&
* listSharesOptions.setIncludeDeleted&
* fileServiceClient.listShares&
* deletedShare -> &
* ShareClient shareClient = fileServiceClient.undeleteShareWithResponse&
* deletedShare.getName&
* &
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareServiceClient.undeleteShareWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param deletedShareName The name of the previously deleted share.
* @param deletedShareVersion The version of the previously deleted share.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} whose {@link Response
* to interact with the restored share.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareClient> undeleteShareWithResponse(String deletedShareName, String deletedShareVersion,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<SharesRestoreHeaders, Void>> operation = () -> this.azureFileStorageClient.getShares()
.restoreWithResponse(deletedShareName, null, null, deletedShareName, deletedShareVersion, finalContext);
ResponseBase<SharesRestoreHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return new SimpleResponse<>(response, getShareClient(deletedShareName));
}
} |
Guard usage of FileChannel.map as it may not work correctly on Windows: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/src/main/java/com/azure/core/util/FluxUtil.java#L282 | public void uploadFromFile(String uploadFilePath, ShareRequestConditions requestConditions) {
List<ShareFileRange> shareFileRanges = ModelHelper.sliceFile(uploadFilePath);
try (FileChannel channel = FileChannel.open(Paths.get(uploadFilePath), StandardOpenOption.READ)) {
shareFileRanges.stream().forEach(range -> {
try {
MappedByteBuffer map = channel.map(READ_ONLY, range.getStart(),
range.getEnd() - range.getStart() + 1);
InputStream inputStream = new MappedByteBufferInputStream(map);
ShareFileUploadRangeOptions shareFileUploadRangeOptions =
new ShareFileUploadRangeOptions(inputStream, range.getEnd() - range.getStart() + 1)
.setRequestConditions(requestConditions)
.setOffset(range.getStart());
uploadRangeWithResponse(shareFileUploadRangeOptions, null, Context.NONE);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
});
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
} | MappedByteBuffer map = channel.map(READ_ONLY, range.getStart(), | public void uploadFromFile(String uploadFilePath, ShareRequestConditions requestConditions) {
shareFileAsyncClient.uploadFromFile(uploadFilePath, requestConditions).block();
} | class ShareFileClient {
private final ShareFileAsyncClient shareFileAsyncClient;
private static final ClientLogger LOGGER = new ClientLogger(ShareFileClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String filePath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String fileUrlString;
/**
* Creates a ShareFileClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param filePath Name of the file
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareFileClient(ShareFileAsyncClient shareFileAsyncClient, AzureFileStorageImpl azureFileStorageClient,
String shareName, String filePath, String snapshot, String accountName, ShareServiceVersion serviceVersion,
AzureSasCredential sasToken) {
this.shareFileAsyncClient = shareFileAsyncClient;
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(filePath, "'filePath' cannot be null.");
this.shareName = shareName;
this.filePath = filePath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder fileUrlstring = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(filePath);
if (snapshot != null) {
fileUrlstring.append("?sharesnapshot=").append(snapshot);
}
this.fileUrlString = fileUrlstring.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
public String getFileUrl() {
return this.fileUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Opens a file input stream to download the file.
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream() {
return openInputStream(new ShareFileRange(0));
}
/**
* Opens a file input stream to download the specified range of the file.
*
* @param range {@link ShareFileRange}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream(ShareFileRange range) {
return new StorageFileInputStream(shareFileAsyncClient, range.getStart(),
range.getEnd() == null ? null : (range.getEnd() - range.getStart() + 1));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream() {
return getFileOutputStream(0);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @param offset Starting point of the upload range.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream(long offset) {
return new StorageFileOutputStream(shareFileAsyncClient, offset);
}
/**
* Creates and opens a {@link SeekableByteChannel} to write data to the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelWrite(ShareFileSeekableByteChannelWriteOptions options) {
Objects.requireNonNull(options, "'options' cannot be null.");
if (options.isOverwriteMode()) {
Objects.requireNonNull(options.getFileSizeInBytes(), "'options.getFileSize()' cannot return null.");
create(options.getFileSizeInBytes());
}
int chunkSize = options.getChunkSizeInBytes() != null
? options.getChunkSizeInBytes().intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileWriteBehavior(this, options.getRequestConditions(),
options.getFileLastWrittenMode()), 0L);
}
/**
* Creates and opens a {@link SeekableByteChannel} to read data from the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelRead(ShareFileSeekableByteChannelReadOptions options) {
ShareRequestConditions conditions = options != null ? options.getRequestConditions() : null;
Long configuredChunkSize = options != null ? options.getChunkSizeInBytes() : null;
int chunkSize = configuredChunkSize != null ? configuredChunkSize.intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileReadBehavior(this, conditions), 0L);
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.exists -->
*
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.existsWithResponse
* <pre>
* Context context = new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e)) {
HttpResponse response = ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.create -->
* <pre>
* ShareFileInfo response = fileClient.create&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.create -->
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @return The {@link ShareFileInfo file info}
* @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName
* is an invalid resource name.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo create(long maxSize) {
return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout,
Context context) {
return this.createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, null, timeout,
context);
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
*
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.FILE_PERMISSION_INHERIT);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.FILE_ATTRIBUTES_NONE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.FILE_TIME_NOW);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.FILE_TIME_NOW);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesCreateHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().createWithResponse(shareName, filePath, maxSize, fileAttributes,
null, metadata, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(),
httpHeaders, finalContext);
ResponseBase<FilesCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.createFileInfoResponse(response);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
*
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata,
Duration pollInterval) {
ShareFileCopyOptions options = new ShareFileCopyOptions().setMetadata(metadata);
return this.beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* PermissionCopyModeType.SOURCE, ignoreReadOnly, setArchiveAttribute,
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param filePermissionCopyMode Mode of file permission acquisition.
* @param ignoreReadOnly Whether to copy despite target being read only. (default is false)
* @param setArchiveAttribute Whether the archive attribute is to be set on the target. (default is true)
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param destinationRequestConditions {@link ShareRequestConditions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, FileSmbProperties smbProperties,
String filePermission, PermissionCopyModeType filePermissionCopyMode, Boolean ignoreReadOnly,
Boolean setArchiveAttribute, Map<String, String> metadata, Duration pollInterval,
ShareRequestConditions destinationRequestConditions) {
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(filePermission)
.setPermissionCopyModeType(filePermissionCopyMode)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setMetadata(metadata)
.setDestinationRequestConditions(destinationRequestConditions);
return beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList&
* &
*
* ShareFileCopyOptions options = new ShareFileCopyOptions&
* .setSmbProperties&
* .setFilePermission&
* .setIgnoreReadOnly&
* .setArchiveAttribute&
* .setDestinationRequestConditions&
* .setSmbPropertiesToCopy&
* .setPermissionCopyModeType&
* .setMetadata&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param options {@link ShareFileCopyOptions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, ShareFileCopyOptions options, Duration pollInterval) {
final ShareRequestConditions finalRequestConditions =
options.getDestinationRequestConditions() == null ? new ShareRequestConditions()
: options.getDestinationRequestConditions();
final AtomicReference<String> copyId = new AtomicReference<>();
final Duration interval = pollInterval == null ? Duration.ofSeconds(1) : pollInterval;
FileSmbProperties tempSmbProperties = options.getSmbProperties() == null ? new FileSmbProperties()
: options.getSmbProperties();
String filePermissionKey = tempSmbProperties.getFilePermissionKey();
if (options.getFilePermission() == null || options.getPermissionCopyModeType() == PermissionCopyModeType.SOURCE) {
if ((options.getFilePermission() != null || filePermissionKey != null)
&& options.getPermissionCopyModeType() != PermissionCopyModeType.OVERRIDE) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("File permission and file permission " +
"key can not be set when PermissionCopyModeType is source or null"));
}
} else if (options.getPermissionCopyModeType() == PermissionCopyModeType.OVERRIDE) {
try {
ModelHelper.validateFilePermissionAndKey(options.getFilePermission(),
tempSmbProperties.getFilePermissionKey());
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
}
CopyableFileSmbPropertiesList list = options.getSmbPropertiesToCopy() == null
? new CopyableFileSmbPropertiesList() : options.getSmbPropertiesToCopy();
try {
ModelHelper.validateCopyFlagAndSmbProperties(options, tempSmbProperties);
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
String fileAttributes = list.isFileAttributes() ? FileConstants.COPY_SOURCE : NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = list.isCreatedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = list.isLastWrittenOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangedOnTime = list.isChangedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
final CopyFileSmbInfo copyFileSmbInfo = new CopyFileSmbInfo()
.setFilePermissionCopyMode(options.getPermissionCopyModeType())
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangedOnTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly())
.setSetArchiveAttribute(options.isArchiveAttributeSet());
final String copySource = Utility.encodeUrlPath(sourceUrl);
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> syncActivationOperation =
(pollingContext) -> {
ResponseBase<FilesStartCopyHeaders, Void> response = azureFileStorageClient.getFiles()
.startCopyWithResponse(shareName, filePath, copySource, null,
options.getMetadata(), options.getFilePermission(), tempSmbProperties.getFilePermissionKey(),
finalRequestConditions.getLeaseId(), copyFileSmbInfo, null);
FilesStartCopyHeaders headers = response.getDeserializedHeaders();
copyId.set(headers.getXMsCopyId());
return new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, new ShareFileCopyInfo(
sourceUrl,
headers.getXMsCopyId(),
headers.getXMsCopyStatus(),
headers.getETag(),
headers.getLastModified(),
response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-error-code"))));
};
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> pollOperation = (pollingContext) ->
onPoll(pollingContext.getLatestResponse(), finalRequestConditions);
BiFunction<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>, ShareFileCopyInfo> cancelOperation =
(pollingContext, firstResponse) -> {
if (firstResponse == null || firstResponse.getValue() == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Cannot cancel a poll response that never started."));
}
final String copyIdentifier = firstResponse.getValue().getCopyId();
if (!CoreUtils.isNullOrEmpty(copyIdentifier)) {
LOGGER.info("Cancelling copy operation for copy id: {}", copyIdentifier);
abortCopyWithResponse(copyIdentifier, finalRequestConditions, null, null);
return firstResponse.getValue();
}
return null;
};
Function<PollingContext<ShareFileCopyInfo>, Void> fetchResultOperation = (pollingContext) -> null;
return SyncPoller.createPoller(interval, syncActivationOperation, pollOperation, cancelOperation, fetchResultOperation);
}
private PollResponse<ShareFileCopyInfo> onPoll(PollResponse<ShareFileCopyInfo> pollResponse,
ShareRequestConditions requestConditions) {
if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
|| pollResponse.getStatus() == LongRunningOperationStatus.FAILED) {
return pollResponse;
}
final ShareFileCopyInfo lastInfo = pollResponse.getValue();
if (lastInfo == null) {
LOGGER.warning("ShareFileCopyInfo does not exist. Activation operation failed.");
return new PollResponse<>(LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null);
}
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(requestConditions, null, null);
ShareFileProperties value = response.getValue();
final CopyStatusType status = value.getCopyStatus();
final ShareFileCopyInfo result = new ShareFileCopyInfo(value.getCopySource(), value.getCopyId(),
status, value.getETag(), value.getCopyCompletionTime(), value.getCopyStatusDescription());
LongRunningOperationStatus operationStatus = ModelHelper.mapStatusToLongRunningOperationStatus(status);
return new PollResponse<>(operationStatus, result);
} catch (RuntimeException e) {
return new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo);
}
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopy
* <pre>
* fileClient.abortCopy&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopy(String copyId) {
abortCopyWithResponse(copyId, null, Context.NONE);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* Response<Void> response = fileClient.abortCopyWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) {
return this.abortCopyWithResponse(copyId, null, timeout, context);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.abortCopyWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesAbortCopyHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().abortCopyWithResponse(shareName, filePath, copyId, null,
finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFile
* <pre>
* fileClient.downloadToFile&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @return The properties of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties downloadToFile(String downloadFilePath) {
return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue();
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
Duration timeout, Context context) {
return this.downloadToFileWithResponse(downloadFilePath, range, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* requestConditions, Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath,
range, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file with its metadata and properties. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.download
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* fileClient.download&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @throws NullPointerException If {@code stream} is {@code null}.
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, Context.NONE);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* Response<Void> response = fileClient.downloadWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, Duration timeout, Context context) {
return this.downloadWithResponse(stream, range, rangeGetContentMD5, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.downloadWithResponse&
* requestConditions, Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, ShareRequestConditions requestConditions, Duration timeout, Context context) {
return downloadWithResponse(stream, new ShareFileDownloadOptions().setRange(range)
.setRangeContentMd5Requested(rangeGetContentMD5).setRequestConditions(requestConditions), timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileRange range = new ShareFileRange&
* DownloadRetryOptions retryOptions = new DownloadRetryOptions&
* ShareFileDownloadOptions options = new ShareFileDownloadOptions&
* .setRequestConditions&
* .setRangeContentMd5Requested&
* .setRetryOptions&
* Response<Void> response = fileClient.downloadWithResponse&
* new Context&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param options {@link ShareFileDownloadOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileDownloadOptions options,
Duration timeout, Context context) {
Objects.requireNonNull(stream, "'stream' cannot be null.");
Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(options, context)
.flatMap(response -> FluxUtil.writeToOutputStream(response.getValue(), stream)
.thenReturn(new ShareFileDownloadResponse(response)));
return StorageImplUtils.blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.delete -->
* <pre>
* fileClient.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.delete -->
*
* <p>For more information, see the
* <a href="https:
*
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, Context.NONE);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* Response<Void> response = fileClient.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(Duration timeout, Context context) {
return this.deleteWithResponse(null, timeout, context);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.deleteWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions()
: requestConditions;
Callable<ResponseBase<FilesDeleteHeaders, Void>> operation = () -> this.azureFileStorageClient.getFiles()
.deleteWithResponse(shareName, filePath, null, finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout);
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
* <pre>
* boolean result = fileClient.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
*
* <p>For more information, see the
* <a href="https:
* @return {@code true} if the file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Boolean> response = fileClient.deleteIfExistsWithResponse&
* new Context&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 202, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
try {
Response<Void> response = this.deleteWithResponse(requestConditions, timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(ShareErrorCode.RESOURCE_NOT_FOUND)) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getProperties -->
* <pre>
* ShareFileProperties properties = fileClient.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileProperties Storage file properties}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties getProperties() {
return getPropertiesWithResponse(null, Context.NONE).getValue();
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) {
return this.getPropertiesWithResponse(null, timeout, context);
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesGetPropertiesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().getPropertiesWithResponse(shareName, filePath, snapshot,
null, finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesGetPropertiesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.getPropertiesResponse(response);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileInfo response = fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @return The {@link ShareFileInfo file info}
* @throws IllegalArgumentException thrown if parameters fail the validation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission) {
return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE)
.getValue();
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* filePermission, Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) {
return this.setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null,
timeout, context);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setPropertiesWithResponse&
* null&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.PRESERVE);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.PRESERVE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.PRESERVE);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.PRESERVE);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesSetHttpHeadersHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setHttpHeadersWithResponse(shareName, filePath, fileAttributes, null,
newFileSize, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(), httpHeaders, finalContext);
ResponseBase<FilesSetHttpHeadersHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.setPropertiesResponse(response);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @return The {@link ShareFileMetadataInfo file meta info}
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) {
return setMetadataWithResponse(metadata, null, Context.NONE).getValue();
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout,
Context context) {
return this.setMetadataWithResponse(metadata, null, timeout, context);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesSetMetadataHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setMetadataWithResponse(shareName, filePath, null, metadata,
finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesSetMetadataHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.setMetadataResponse(response);
}
/**
* Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = fileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*
* @deprecated Use {@link ShareFileClient
* {@link ShareFileClient
* large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo upload(InputStream data, long length) {
return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
Duration timeout, Context context) {
return this.uploadWithResponse(data, length, offset, null, timeout, context);
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* requestConditions, Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
return this.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(data, length).setOffset(offset).setRequestConditions(requestConditions),
timeout, context);
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param transferOptions {@link ParallelTransferOptions} for file transfer.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public ShareFileUploadInfo upload(InputStream data, long length, ParallelTransferOptions transferOptions) {
return uploadWithResponse(new ShareFileUploadOptions(data, length).setParallelTransferOptions(transferOptions),
null, Context.NONE).getValue();
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileAsyncClient.uploadWithResponse&
* new ShareFileUploadOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public Response<ShareFileUploadInfo> uploadWithResponse(ShareFileUploadOptions options,
Duration timeout, Context context) {
return StorageImplUtils.blockWithOptionalTimeout(
shareFileAsyncClient.uploadWithResponse(options, context), timeout);
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRange
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.uploadRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRange
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public ShareFileUploadInfo uploadRange(InputStream data, long length) {
return this.uploadRangeWithResponse(new ShareFileUploadRangeOptions(data, length), null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileClient.uploadRangeWithResponse&
* new ShareFileUploadRangeOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public Response<ShareFileUploadInfo> uploadRangeWithResponse(ShareFileUploadRangeOptions options,
Duration timeout, Context context) {
/**
* One-shot upload range.
*/
ShareRequestConditions requestConditions = options.getRequestConditions() == null
? new ShareRequestConditions() : options.getRequestConditions();
long rangeOffset = (options.getOffset() == null) ? 0L : options.getOffset();
ShareFileRange range = new ShareFileRange(rangeOffset, rangeOffset + options.getLength() - 1);
Context finalContext = context == null ? Context.NONE : context;
BinaryData binaryData = options.getDataStream() != null ? BinaryData.fromStream(options.getDataStream())
: BinaryData.fromFlux(options.getDataFlux(), options.getLength()).block();
Callable<ResponseBase<FilesUploadRangeHeaders, Void>> operation = () -> azureFileStorageClient.getFiles()
.uploadRangeWithResponse(shareName, filePath, range.toString(), ShareFileRangeWriteType.UPDATE,
options.getLength(), null, null, requestConditions.getLeaseId(), options.getLastWrittenMode(),
binaryData, finalContext);
ResponseBase<FilesUploadRangeHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.uploadRangeHeadersToShareFileInfo(response);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
* <pre>
* ShareFileUploadRangeFromUrlInfo response = fileClient.uploadRangeFromUrl&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset,
String sourceUrl) {
return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE)
.getValue();
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, Duration timeout, Context context) {
return this.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, timeout,
context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadRangeFromUrlInfo> response = fileClient.uploadRangeFromUrlWithResponse&
* "sourceUrl", requestConditions, Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, ShareRequestConditions requestConditions, Duration timeout,
Context context) {
return this.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(length, sourceUrl)
.setDestinationOffset(destinationOffset).setSourceOffset(sourceOffset)
.setDestinationRequestConditions(requestConditions), timeout, context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* .setDestinationOffset&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options argument collection
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(
ShareFileUploadRangeFromUrlOptions options, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
ShareFileRange destinationRange = new ShareFileRange(options.getDestinationOffset(),
options.getDestinationOffset() + options.getLength() - 1);
ShareFileRange sourceRange = new ShareFileRange(options.getSourceOffset(),
options.getSourceOffset() + options.getLength() - 1);
Context finalContext = context == null ? Context.NONE : context;
String sourceAuth = options.getSourceAuthorization() == null
? null : options.getSourceAuthorization().toString();
String copySource = Utility.encodeUrlPath(options.getSourceUrl());
Callable<ResponseBase<FilesUploadRangeFromURLHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles()
.uploadRangeFromURLWithResponse(shareName, filePath, destinationRange.toString(), copySource, 0,
null, sourceRange.toString(), null, finalRequestConditions.getLeaseId(), sourceAuth,
options.getLastWrittenMode(), null, finalContext);
ResponseBase<FilesUploadRangeFromURLHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.mapUploadRangeFromUrlResponse(response);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clears the first 1024 bytes. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRange
* <pre>
* ShareFileUploadInfo response = fileClient.clearRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRange
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being cleared.
* @return The {@link ShareFileUploadInfo file upload info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo clearRange(long length) {
return clearRangeWithResponse(length, 0, null, Context.NONE).getValue();
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout,
Context context) {
return this.clearRangeWithResponse(length, offset, null, timeout, context);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
ShareFileRange range = new ShareFileRange(offset, offset + length - 1);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesUploadRangeHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().uploadRangeWithResponse(shareName, filePath, range.toString(),
ShareFileRangeWriteType.CLEAR, 0L, null, null, finalRequestConditions.getLeaseId(), null, null,
finalContext);
ResponseBase<FilesUploadRangeHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.transformUploadResponse(response);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String uploadFilePath) {
this.uploadFromFile(uploadFilePath, null);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
* @param requestConditions {@link ShareRequestConditions}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges -->
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileRange ranges} in the files.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges() {
return listRanges((ShareFileRange) null, null, null);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* new Context&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) {
return this.listRanges(range, null, timeout, context);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* Duration.ofSeconds&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
String rangeString = range == null ? null : range.toString();
try {
Supplier<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
null, null, rangeString, finalRequestConditions.getLeaseId(), null, finalContext);
ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
List<ShareFileRange> shareFileRangeList =
response.getValue().getRanges().stream()
.map(r -> new Range().setStart(r.getStart()).setEnd(r.getEnd()))
.map(ShareFileRange::new).collect(Collectors.toList());
Supplier<PagedResponse<ShareFileRange>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(), shareFileRangeList, null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiff
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiff&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiff
*
* <p>For more information, see the
* <a href="https:
*
* @param previousSnapshot Specifies that the response will contain only ranges that were changed between target
* file and previous snapshot. Changed ranges include both updated and cleared ranges. The target file may be a
* snapshot, as long as the snapshot specified by previousSnapshot is the older of the two.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileRangeList listRangesDiff(String previousSnapshot) {
return this.listRangesDiffWithResponse(new ShareFileListRangesDiffOptions(previousSnapshot), null, Context.NONE)
.getValue();
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiffWithResponse&
* new ShareFileListRangesDiffOptions&
* .setRange&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ShareFileListRangesDiffOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileRangeList> listRangesDiffWithResponse(ShareFileListRangesDiffOptions options,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions requestConditions = options.getRequestConditions() == null
? new ShareRequestConditions() : options.getRequestConditions();
String rangeString = options.getRange() == null ? null : options.getRange().toString();
Callable<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
options.getPreviousSnapshot(), null, rangeString, requestConditions.getLeaseId(), null, finalContext);
return StorageImplUtils.sendRequest(operation, timeout);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles -->
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link HandleItem handles} in the files that satisfy the requirements
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles() {
return listHandles(null, null, Context.NONE);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List 10 handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param maxResultsPerPage Optional max number of results returned per page
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link HandleItem handles} in the file that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesListHandlesHeaders, ListHandlesResponse>> operation = () ->
this.azureFileStorageClient.getFiles().listHandlesWithResponse(shareName, filePath, null,
maxResultsPerPage, null, snapshot, finalContext);
ResponseBase<FilesListHandlesHeaders, ListHandlesResponse> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<HandleItem>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ModelHelper.transformHandleItems(response.getValue().getHandleList()),
null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandle
* <pre>
* fileClient.listHandles&
* fileClient.forceCloseHandle&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandle
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @return Information about the closed handles.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseHandle(String handleId) {
return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue();
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
* <pre>
* fileClient.listHandles&
* Response<CloseHandlesInfo> closeResponse = fileClient
* .forceCloseHandleWithResponse&
* System.out.printf&
* handleItem.getHandleId&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that contains information about the closed handles, headers and response status code.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, handleId,
null, null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return new SimpleResponse<>(response,
new CloseHandlesInfo(response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed()));
}
/**
* Closes all handles opened on the file at the service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close all handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
* <pre>
* CloseHandlesInfo closeHandlesInfo = fileClient.forceCloseAllHandles&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the closed handles
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, "*", null,
null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<CloseHandlesInfo>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(),
Collections.singletonList(new CloseHandlesInfo(
response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed())),
response.getDeserializedHeaders().getXMsMarker(),
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse).stream().reduce(new CloseHandlesInfo(0, 0),
(accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles(),
accu.getFailedHandles() + next.getFailedHandles()));
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.rename
* <pre>
* ShareFileClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.rename
*
* @param destinationPath Relative path from the share to rename the file to.
* @return A {@link ShareFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileClient rename(String destinationPath) {
return renameWithResponse(new ShareFileRenameOptions(destinationPath), null, Context.NONE).getValue();
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.renameWithResponse
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* ShareFileRenameOptions options = new ShareFileRenameOptions&
* .setDestinationRequestConditions&
* .setSourceRequestConditions&
* .setIgnoreReadOnly&
* .setReplaceIfExists&
* .setFilePermission&
* .setSmbProperties&
*
* ShareFileClient newRenamedClient = client.renameWithResponse&
* .getValue&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.renameWithResponse
*
* @param options {@link ShareFileRenameOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} whose {@link Response
* interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileClient> renameWithResponse(ShareFileRenameOptions options, Duration timeout,
Context context) {
StorageImplUtils.assertNotNull("options", options);
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions sourceRequestConditions = options.getSourceRequestConditions() == null
? new ShareRequestConditions() : options.getSourceRequestConditions();
ShareRequestConditions destinationRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
SourceLeaseAccessConditions sourceConditions = new SourceLeaseAccessConditions()
.setSourceLeaseId(sourceRequestConditions.getLeaseId());
DestinationLeaseAccessConditions destinationConditions = new DestinationLeaseAccessConditions()
.setDestinationLeaseId(destinationRequestConditions.getLeaseId());
CopyFileSmbInfo smbInfo;
String filePermissionKey;
if (options.getSmbProperties() != null) {
FileSmbProperties tempSmbProperties = options.getSmbProperties();
filePermissionKey = tempSmbProperties.getFilePermissionKey();
String fileAttributes = NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangeTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
smbInfo = new CopyFileSmbInfo()
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangeTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly());
} else {
smbInfo = null;
filePermissionKey = null;
}
ShareFileClient destinationFileClient = getFileClient(options.getDestinationPath());
ShareFileHttpHeaders headers = options.getContentType() == null ? null
: new ShareFileHttpHeaders().setContentType(options.getContentType());
String renameSource = this.sasToken != null
? this.getFileUrl() + "?" + this.sasToken.getSignature() : this.getFileUrl();
Callable<ResponseBase<FilesRenameHeaders, Void>> operation = () ->
destinationFileClient.azureFileStorageClient.getFiles().renameWithResponse(
destinationFileClient.getShareName(), destinationFileClient.getFilePath(), renameSource,
null /* timeout */, options.getReplaceIfExists(), options.isIgnoreReadOnly(),
options.getFilePermission(), filePermissionKey, options.getMetadata(), sourceConditions,
destinationConditions, smbInfo, headers, finalContext);
ResponseBase<FilesRenameHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return new SimpleResponse<>(response, destinationFileClient);
}
ShareFileClient getFileClient(String destinationPath) {
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new ShareFileClient(shareFileAsyncClient, this.azureFileStorageClient, getShareName(), destinationPath, null,
this.getAccountName(), this.getServiceVersion(), this.getSasToken());
}
/**
* Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Get the share snapshot id. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
* <pre>
* OffsetDateTime currentTime = OffsetDateTime.of&
* ShareFileClient fileClient = new ShareFileClientBuilder&
* .endpoint&
* .sasToken&
* .shareName&
* .resourcePath&
* .snapshot&
* .buildFileClient&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
*
* @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base
* share.
*/
public String getShareSnapshotId() {
return this.snapshot;
}
/**
* Get the share name of file client.
*
* <p>Get the share name. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareName -->
* <pre>
* String shareName = fileClient.getShareName&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareName -->
*
* @return The share name of the file.
*/
public String getShareName() {
return shareName;
}
/**
* Get file path of the client.
*
* <p>Get the file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getFilePath -->
* <pre>
* String filePath = fileClient.getFilePath&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getFilePath -->
*
* @return The path of the file.
*/
public String getFilePath() {
return filePath;
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return azureFileStorageClient.getHttpPipeline();
}
AzureSasCredential getSasToken() {
return sasToken;
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues) {
return generateSas(shareServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* &
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues, Context context) {
return new ShareSasImplUtil(shareServiceSasSignatureValues, getShareName(), getFilePath())
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} | class ShareFileClient {
private final ShareFileAsyncClient shareFileAsyncClient;
private static final ClientLogger LOGGER = new ClientLogger(ShareFileClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String filePath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String fileUrlString;
/**
* Creates a ShareFileClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param filePath Name of the file
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareFileClient(ShareFileAsyncClient shareFileAsyncClient, AzureFileStorageImpl azureFileStorageClient,
String shareName, String filePath, String snapshot, String accountName, ShareServiceVersion serviceVersion,
AzureSasCredential sasToken) {
this.shareFileAsyncClient = shareFileAsyncClient;
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(filePath, "'filePath' cannot be null.");
this.shareName = shareName;
this.filePath = filePath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder fileUrlstring = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(filePath);
if (snapshot != null) {
fileUrlstring.append("?sharesnapshot=").append(snapshot);
}
this.fileUrlString = fileUrlstring.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
public String getFileUrl() {
return this.fileUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Opens a file input stream to download the file.
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream() {
return openInputStream(new ShareFileRange(0));
}
/**
* Opens a file input stream to download the specified range of the file.
*
* @param range {@link ShareFileRange}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream(ShareFileRange range) {
return new StorageFileInputStream(shareFileAsyncClient, range.getStart(),
range.getEnd() == null ? null : (range.getEnd() - range.getStart() + 1));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream() {
return getFileOutputStream(0);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @param offset Starting point of the upload range.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream(long offset) {
return new StorageFileOutputStream(shareFileAsyncClient, offset);
}
/**
* Creates and opens a {@link SeekableByteChannel} to write data to the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelWrite(ShareFileSeekableByteChannelWriteOptions options) {
Objects.requireNonNull(options, "'options' cannot be null.");
if (options.isOverwriteMode()) {
Objects.requireNonNull(options.getFileSizeInBytes(), "'options.getFileSize()' cannot return null.");
create(options.getFileSizeInBytes());
}
int chunkSize = options.getChunkSizeInBytes() != null
? options.getChunkSizeInBytes().intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileWriteBehavior(this, options.getRequestConditions(),
options.getFileLastWrittenMode()), 0L);
}
/**
* Creates and opens a {@link SeekableByteChannel} to read data from the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelRead(ShareFileSeekableByteChannelReadOptions options) {
ShareRequestConditions conditions = options != null ? options.getRequestConditions() : null;
Long configuredChunkSize = options != null ? options.getChunkSizeInBytes() : null;
int chunkSize = configuredChunkSize != null ? configuredChunkSize.intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileReadBehavior(this, conditions), 0L);
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.exists -->
*
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.existsWithResponse
* <pre>
* Context context = new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e) && e instanceof HttpResponseException) {
HttpResponse response = ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.create -->
* <pre>
* ShareFileInfo response = fileClient.create&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.create -->
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @return The {@link ShareFileInfo file info}
* @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName
* is an invalid resource name.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo create(long maxSize) {
return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout,
Context context) {
return this.createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, null, timeout,
context);
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
*
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.FILE_PERMISSION_INHERIT);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.FILE_ATTRIBUTES_NONE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.FILE_TIME_NOW);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.FILE_TIME_NOW);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesCreateHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().createWithResponse(shareName, filePath, maxSize, fileAttributes,
null, metadata, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(),
httpHeaders, finalContext);
ResponseBase<FilesCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.createFileInfoResponse(response);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
*
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata,
Duration pollInterval) {
ShareFileCopyOptions options = new ShareFileCopyOptions().setMetadata(metadata);
return this.beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* PermissionCopyModeType.SOURCE, ignoreReadOnly, setArchiveAttribute,
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param filePermissionCopyMode Mode of file permission acquisition.
* @param ignoreReadOnly Whether to copy despite target being read only. (default is false)
* @param setArchiveAttribute Whether the archive attribute is to be set on the target. (default is true)
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param destinationRequestConditions {@link ShareRequestConditions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, FileSmbProperties smbProperties,
String filePermission, PermissionCopyModeType filePermissionCopyMode, Boolean ignoreReadOnly,
Boolean setArchiveAttribute, Map<String, String> metadata, Duration pollInterval,
ShareRequestConditions destinationRequestConditions) {
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(filePermission)
.setPermissionCopyModeType(filePermissionCopyMode)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setMetadata(metadata)
.setDestinationRequestConditions(destinationRequestConditions);
return beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList&
* &
*
* ShareFileCopyOptions options = new ShareFileCopyOptions&
* .setSmbProperties&
* .setFilePermission&
* .setIgnoreReadOnly&
* .setArchiveAttribute&
* .setDestinationRequestConditions&
* .setSmbPropertiesToCopy&
* .setPermissionCopyModeType&
* .setMetadata&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param options {@link ShareFileCopyOptions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, ShareFileCopyOptions options, Duration pollInterval) {
final ShareRequestConditions finalRequestConditions =
options.getDestinationRequestConditions() == null ? new ShareRequestConditions()
: options.getDestinationRequestConditions();
final AtomicReference<String> copyId = new AtomicReference<>();
final Duration interval = pollInterval == null ? Duration.ofSeconds(1) : pollInterval;
FileSmbProperties tempSmbProperties = options.getSmbProperties() == null ? new FileSmbProperties()
: options.getSmbProperties();
String filePermissionKey = tempSmbProperties.getFilePermissionKey();
if (options.getFilePermission() == null || options.getPermissionCopyModeType() == PermissionCopyModeType.SOURCE) {
if ((options.getFilePermission() != null || filePermissionKey != null)
&& options.getPermissionCopyModeType() != PermissionCopyModeType.OVERRIDE) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("File permission and file permission key can not be set when PermissionCopyModeType is source or null"));
}
} else if (options.getPermissionCopyModeType() == PermissionCopyModeType.OVERRIDE) {
try {
ModelHelper.validateFilePermissionAndKey(options.getFilePermission(),
tempSmbProperties.getFilePermissionKey());
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
}
CopyableFileSmbPropertiesList list = options.getSmbPropertiesToCopy() == null
? new CopyableFileSmbPropertiesList() : options.getSmbPropertiesToCopy();
try {
ModelHelper.validateCopyFlagAndSmbProperties(options, tempSmbProperties);
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
String fileAttributes = list.isFileAttributes() ? FileConstants.COPY_SOURCE : NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = list.isCreatedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = list.isLastWrittenOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangedOnTime = list.isChangedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
final CopyFileSmbInfo copyFileSmbInfo = new CopyFileSmbInfo()
.setFilePermissionCopyMode(options.getPermissionCopyModeType())
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangedOnTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly())
.setSetArchiveAttribute(options.isArchiveAttributeSet());
final String copySource = Utility.encodeUrlPath(sourceUrl);
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> syncActivationOperation =
(pollingContext) -> {
ResponseBase<FilesStartCopyHeaders, Void> response = azureFileStorageClient.getFiles()
.startCopyWithResponse(shareName, filePath, copySource, null,
options.getMetadata(), options.getFilePermission(), tempSmbProperties.getFilePermissionKey(),
finalRequestConditions.getLeaseId(), copyFileSmbInfo, null);
FilesStartCopyHeaders headers = response.getDeserializedHeaders();
copyId.set(headers.getXMsCopyId());
return new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, new ShareFileCopyInfo(
sourceUrl,
headers.getXMsCopyId(),
headers.getXMsCopyStatus(),
headers.getETag(),
headers.getLastModified(),
response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-error-code"))));
};
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> pollOperation = (pollingContext) ->
onPoll(pollingContext.getLatestResponse(), finalRequestConditions);
BiFunction<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>, ShareFileCopyInfo> cancelOperation =
(pollingContext, firstResponse) -> {
if (firstResponse == null || firstResponse.getValue() == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Cannot cancel a poll response that never started."));
}
final String copyIdentifier = firstResponse.getValue().getCopyId();
if (!CoreUtils.isNullOrEmpty(copyIdentifier)) {
LOGGER.info("Cancelling copy operation for copy id: {}", copyIdentifier);
abortCopyWithResponse(copyIdentifier, finalRequestConditions, null, null);
return firstResponse.getValue();
}
return null;
};
Function<PollingContext<ShareFileCopyInfo>, Void> fetchResultOperation = (pollingContext) -> null;
return SyncPoller.createPoller(interval, syncActivationOperation, pollOperation, cancelOperation, fetchResultOperation);
}
private PollResponse<ShareFileCopyInfo> onPoll(PollResponse<ShareFileCopyInfo> pollResponse,
ShareRequestConditions requestConditions) {
if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
|| pollResponse.getStatus() == LongRunningOperationStatus.FAILED) {
return pollResponse;
}
final ShareFileCopyInfo lastInfo = pollResponse.getValue();
if (lastInfo == null) {
LOGGER.warning("ShareFileCopyInfo does not exist. Activation operation failed.");
return new PollResponse<>(LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null);
}
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(requestConditions, null, null);
ShareFileProperties value = response.getValue();
final CopyStatusType status = value.getCopyStatus();
final ShareFileCopyInfo result = new ShareFileCopyInfo(value.getCopySource(), value.getCopyId(),
status, value.getETag(), value.getCopyCompletionTime(), value.getCopyStatusDescription());
LongRunningOperationStatus operationStatus = ModelHelper.mapStatusToLongRunningOperationStatus(status);
return new PollResponse<>(operationStatus, result);
} catch (RuntimeException e) {
return new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo);
}
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopy
* <pre>
* fileClient.abortCopy&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopy(String copyId) {
abortCopyWithResponse(copyId, null, Context.NONE);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* Response<Void> response = fileClient.abortCopyWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) {
return this.abortCopyWithResponse(copyId, null, timeout, context);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.abortCopyWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesAbortCopyHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().abortCopyWithResponse(shareName, filePath, copyId, null,
finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFile
* <pre>
* fileClient.downloadToFile&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @return The properties of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties downloadToFile(String downloadFilePath) {
return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue();
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
Duration timeout, Context context) {
return this.downloadToFileWithResponse(downloadFilePath, range, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* requestConditions, Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath,
range, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file with its metadata and properties. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.download
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* fileClient.download&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @throws NullPointerException If {@code stream} is {@code null}.
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, Context.NONE);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* Response<Void> response = fileClient.downloadWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, Duration timeout, Context context) {
return this.downloadWithResponse(stream, range, rangeGetContentMD5, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.downloadWithResponse&
* requestConditions, Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, ShareRequestConditions requestConditions, Duration timeout, Context context) {
return downloadWithResponse(stream, new ShareFileDownloadOptions().setRange(range)
.setRangeContentMd5Requested(rangeGetContentMD5).setRequestConditions(requestConditions), timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileRange range = new ShareFileRange&
* DownloadRetryOptions retryOptions = new DownloadRetryOptions&
* ShareFileDownloadOptions options = new ShareFileDownloadOptions&
* .setRequestConditions&
* .setRangeContentMd5Requested&
* .setRetryOptions&
* Response<Void> response = fileClient.downloadWithResponse&
* new Context&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param options {@link ShareFileDownloadOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileDownloadOptions options,
Duration timeout, Context context) {
Objects.requireNonNull(stream, "'stream' cannot be null.");
Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(options, context)
.flatMap(response -> FluxUtil.writeToOutputStream(response.getValue(), stream)
.thenReturn(new ShareFileDownloadResponse(response)));
return StorageImplUtils.blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.delete -->
* <pre>
* fileClient.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.delete -->
*
* <p>For more information, see the
* <a href="https:
*
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, Context.NONE);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* Response<Void> response = fileClient.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(Duration timeout, Context context) {
return this.deleteWithResponse(null, timeout, context);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.deleteWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions()
: requestConditions;
Callable<ResponseBase<FilesDeleteHeaders, Void>> operation = () -> this.azureFileStorageClient.getFiles()
.deleteWithResponse(shareName, filePath, null, finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
* <pre>
* boolean result = fileClient.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
*
* <p>For more information, see the
* <a href="https:
* @return {@code true} if the file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Boolean> response = fileClient.deleteIfExistsWithResponse&
* new Context&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 202, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
try {
Response<Void> response = this.deleteWithResponse(requestConditions, timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(ShareErrorCode.RESOURCE_NOT_FOUND)) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getProperties -->
* <pre>
* ShareFileProperties properties = fileClient.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileProperties Storage file properties}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties getProperties() {
return getPropertiesWithResponse(null, Context.NONE).getValue();
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) {
return this.getPropertiesWithResponse(null, timeout, context);
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesGetPropertiesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().getPropertiesWithResponse(shareName, filePath, snapshot,
null, finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesGetPropertiesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.getPropertiesResponse(response);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileInfo response = fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @return The {@link ShareFileInfo file info}
* @throws IllegalArgumentException thrown if parameters fail the validation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission) {
return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE)
.getValue();
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* filePermission, Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) {
return this.setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null,
timeout, context);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setPropertiesWithResponse&
* null&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.PRESERVE);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.PRESERVE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.PRESERVE);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.PRESERVE);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesSetHttpHeadersHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setHttpHeadersWithResponse(shareName, filePath, fileAttributes, null,
newFileSize, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(), httpHeaders, finalContext);
ResponseBase<FilesSetHttpHeadersHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.setPropertiesResponse(response);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @return The {@link ShareFileMetadataInfo file meta info}
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) {
return setMetadataWithResponse(metadata, null, Context.NONE).getValue();
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout,
Context context) {
return this.setMetadataWithResponse(metadata, null, timeout, context);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesSetMetadataHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setMetadataWithResponse(shareName, filePath, null, metadata,
finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesSetMetadataHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.setMetadataResponse(response);
}
/**
* Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = fileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*
* @deprecated Use {@link ShareFileClient
* {@link ShareFileClient
* large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo upload(InputStream data, long length) {
return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
Duration timeout, Context context) {
return this.uploadWithResponse(data, length, offset, null, timeout, context);
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* requestConditions, Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
return this.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(data, length).setOffset(offset).setRequestConditions(requestConditions),
timeout, context);
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param transferOptions {@link ParallelTransferOptions} for file transfer.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public ShareFileUploadInfo upload(InputStream data, long length, ParallelTransferOptions transferOptions) {
return uploadWithResponse(new ShareFileUploadOptions(data, length).setParallelTransferOptions(transferOptions),
null, Context.NONE).getValue();
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileAsyncClient.uploadWithResponse&
* new ShareFileUploadOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public Response<ShareFileUploadInfo> uploadWithResponse(ShareFileUploadOptions options,
Duration timeout, Context context) {
return StorageImplUtils.blockWithOptionalTimeout(
shareFileAsyncClient.uploadWithResponse(options, context), timeout);
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRange
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.uploadRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRange
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public ShareFileUploadInfo uploadRange(InputStream data, long length) {
return this.uploadRangeWithResponse(new ShareFileUploadRangeOptions(data, length), null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileClient.uploadRangeWithResponse&
* new ShareFileUploadRangeOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public Response<ShareFileUploadInfo> uploadRangeWithResponse(ShareFileUploadRangeOptions options,
Duration timeout, Context context) {
return StorageImplUtils.blockWithOptionalTimeout(
shareFileAsyncClient.uploadRangeWithResponse(options, context), timeout);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
* <pre>
* ShareFileUploadRangeFromUrlInfo response = fileClient.uploadRangeFromUrl&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset,
String sourceUrl) {
return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE)
.getValue();
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, Duration timeout, Context context) {
return this.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, timeout,
context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadRangeFromUrlInfo> response = fileClient.uploadRangeFromUrlWithResponse&
* "sourceUrl", requestConditions, Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, ShareRequestConditions requestConditions, Duration timeout,
Context context) {
return this.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(length, sourceUrl)
.setDestinationOffset(destinationOffset).setSourceOffset(sourceOffset)
.setDestinationRequestConditions(requestConditions), timeout, context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* .setDestinationOffset&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options argument collection
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(
ShareFileUploadRangeFromUrlOptions options, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
ShareFileRange destinationRange = new ShareFileRange(options.getDestinationOffset(),
options.getDestinationOffset() + options.getLength() - 1);
ShareFileRange sourceRange = new ShareFileRange(options.getSourceOffset(),
options.getSourceOffset() + options.getLength() - 1);
Context finalContext = context == null ? Context.NONE : context;
String sourceAuth = options.getSourceAuthorization() == null
? null : options.getSourceAuthorization().toString();
String copySource = Utility.encodeUrlPath(options.getSourceUrl());
Callable<ResponseBase<FilesUploadRangeFromURLHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles()
.uploadRangeFromURLWithResponse(shareName, filePath, destinationRange.toString(), copySource, 0,
null, sourceRange.toString(), null, finalRequestConditions.getLeaseId(), sourceAuth,
options.getLastWrittenMode(), null, finalContext);
ResponseBase<FilesUploadRangeFromURLHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.mapUploadRangeFromUrlResponse(response);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clears the first 1024 bytes. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRange
* <pre>
* ShareFileUploadInfo response = fileClient.clearRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRange
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being cleared.
* @return The {@link ShareFileUploadInfo file upload info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo clearRange(long length) {
return clearRangeWithResponse(length, 0, null, Context.NONE).getValue();
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout,
Context context) {
return this.clearRangeWithResponse(length, offset, null, timeout, context);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
ShareFileRange range = new ShareFileRange(offset, offset + length - 1);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesUploadRangeHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().uploadRangeWithResponse(shareName, filePath, range.toString(),
ShareFileRangeWriteType.CLEAR, 0L, null, null, finalRequestConditions.getLeaseId(), null, null,
finalContext);
ResponseBase<FilesUploadRangeHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.transformUploadResponse(response);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String uploadFilePath) {
this.uploadFromFile(uploadFilePath, null);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
* @param requestConditions {@link ShareRequestConditions}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges -->
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileRange ranges} in the files.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges() {
return listRanges((ShareFileRange) null, null, null);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* new Context&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) {
return this.listRanges(range, null, timeout, context);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* Duration.ofSeconds&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
String rangeString = range == null ? null : range.toString();
try {
Supplier<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
null, null, rangeString, finalRequestConditions.getLeaseId(), null, finalContext);
ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
List<ShareFileRange> shareFileRangeList =
response.getValue().getRanges().stream()
.map(r -> new Range().setStart(r.getStart()).setEnd(r.getEnd()))
.map(ShareFileRange::new).collect(Collectors.toList());
Supplier<PagedResponse<ShareFileRange>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(), shareFileRangeList, null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiff
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiff&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiff
*
* <p>For more information, see the
* <a href="https:
*
* @param previousSnapshot Specifies that the response will contain only ranges that were changed between target
* file and previous snapshot. Changed ranges include both updated and cleared ranges. The target file may be a
* snapshot, as long as the snapshot specified by previousSnapshot is the older of the two.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileRangeList listRangesDiff(String previousSnapshot) {
return this.listRangesDiffWithResponse(new ShareFileListRangesDiffOptions(previousSnapshot), null, Context.NONE)
.getValue();
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiffWithResponse&
* new ShareFileListRangesDiffOptions&
* .setRange&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ShareFileListRangesDiffOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileRangeList> listRangesDiffWithResponse(ShareFileListRangesDiffOptions options,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions requestConditions = options.getRequestConditions() == null
? new ShareRequestConditions() : options.getRequestConditions();
String rangeString = options.getRange() == null ? null : options.getRange().toString();
Callable<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
options.getPreviousSnapshot(), null, rangeString, requestConditions.getLeaseId(), options.isRenameIncluded(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles -->
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link HandleItem handles} in the files that satisfy the requirements
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles() {
return listHandles(null, null, Context.NONE);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List 10 handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param maxResultsPerPage Optional max number of results returned per page
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link HandleItem handles} in the file that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesListHandlesHeaders, ListHandlesResponse>> operation = () ->
this.azureFileStorageClient.getFiles().listHandlesWithResponse(shareName, filePath, null,
maxResultsPerPage, null, snapshot, finalContext);
ResponseBase<FilesListHandlesHeaders, ListHandlesResponse> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<HandleItem>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ModelHelper.transformHandleItems(response.getValue().getHandleList()),
null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandle
* <pre>
* fileClient.listHandles&
* fileClient.forceCloseHandle&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandle
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @return Information about the closed handles.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseHandle(String handleId) {
return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue();
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
* <pre>
* fileClient.listHandles&
* Response<CloseHandlesInfo> closeResponse = fileClient
* .forceCloseHandleWithResponse&
* System.out.printf&
* handleItem.getHandleId&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that contains information about the closed handles, headers and response status code.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, handleId,
null, null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return new SimpleResponse<>(response,
new CloseHandlesInfo(response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed()));
}
/**
* Closes all handles opened on the file at the service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close all handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
* <pre>
* CloseHandlesInfo closeHandlesInfo = fileClient.forceCloseAllHandles&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the closed handles
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, "*", null,
null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<CloseHandlesInfo>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(),
Collections.singletonList(new CloseHandlesInfo(
response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed())),
response.getDeserializedHeaders().getXMsMarker(),
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse).stream().reduce(new CloseHandlesInfo(0, 0),
(accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles(),
accu.getFailedHandles() + next.getFailedHandles()));
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.rename
* <pre>
* ShareFileClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.rename
*
* @param destinationPath Relative path from the share to rename the file to.
* @return A {@link ShareFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileClient rename(String destinationPath) {
return renameWithResponse(new ShareFileRenameOptions(destinationPath), null, Context.NONE).getValue();
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.renameWithResponse
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* ShareFileRenameOptions options = new ShareFileRenameOptions&
* .setDestinationRequestConditions&
* .setSourceRequestConditions&
* .setIgnoreReadOnly&
* .setReplaceIfExists&
* .setFilePermission&
* .setSmbProperties&
*
* ShareFileClient newRenamedClient = client.renameWithResponse&
* .getValue&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.renameWithResponse
*
* @param options {@link ShareFileRenameOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} whose {@link Response
* interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileClient> renameWithResponse(ShareFileRenameOptions options, Duration timeout,
Context context) {
StorageImplUtils.assertNotNull("options", options);
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions sourceRequestConditions = options.getSourceRequestConditions() == null
? new ShareRequestConditions() : options.getSourceRequestConditions();
ShareRequestConditions destinationRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
SourceLeaseAccessConditions sourceConditions = new SourceLeaseAccessConditions()
.setSourceLeaseId(sourceRequestConditions.getLeaseId());
DestinationLeaseAccessConditions destinationConditions = new DestinationLeaseAccessConditions()
.setDestinationLeaseId(destinationRequestConditions.getLeaseId());
CopyFileSmbInfo smbInfo = null;
String filePermissionKey = null;
if (options.getSmbProperties() != null) {
FileSmbProperties tempSmbProperties = options.getSmbProperties();
filePermissionKey = tempSmbProperties.getFilePermissionKey();
String fileAttributes = NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangeTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
smbInfo = new CopyFileSmbInfo()
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangeTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly());
}
CopyFileSmbInfo finalSmbInfo = smbInfo;
String finalFilePermissionKey = filePermissionKey;
ShareFileClient destinationFileClient = getFileClient(options.getDestinationPath());
ShareFileHttpHeaders headers = options.getContentType() == null ? null
: new ShareFileHttpHeaders().setContentType(options.getContentType());
String renameSource = Utility.encodeUrlPath(this.getFileUrl());
String finalRenameSource = this.sasToken != null ? renameSource + "?" + this.sasToken.getSignature() : renameSource;
Callable<ResponseBase<FilesRenameHeaders, Void>> operation = () ->
destinationFileClient.azureFileStorageClient.getFiles().renameWithResponse(
destinationFileClient.getShareName(), destinationFileClient.getFilePath(), finalRenameSource,
null /* timeout */, options.getReplaceIfExists(), options.isIgnoreReadOnly(),
options.getFilePermission(), finalFilePermissionKey, options.getMetadata(), sourceConditions,
destinationConditions, finalSmbInfo, headers, finalContext);
ResponseBase<FilesRenameHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return new SimpleResponse<>(response, destinationFileClient);
}
ShareFileClient getFileClient(String destinationPath) {
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new ShareFileClient(shareFileAsyncClient, this.azureFileStorageClient, getShareName(), destinationPath, null,
this.getAccountName(), this.getServiceVersion(), this.getSasToken());
}
/**
* Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Get the share snapshot id. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
* <pre>
* OffsetDateTime currentTime = OffsetDateTime.of&
* ShareFileClient fileClient = new ShareFileClientBuilder&
* .endpoint&
* .sasToken&
* .shareName&
* .resourcePath&
* .snapshot&
* .buildFileClient&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
*
* @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base
* share.
*/
public String getShareSnapshotId() {
return this.snapshot;
}
/**
* Get the share name of file client.
*
* <p>Get the share name. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareName -->
* <pre>
* String shareName = fileClient.getShareName&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareName -->
*
* @return The share name of the file.
*/
public String getShareName() {
return shareName;
}
/**
* Get file path of the client.
*
* <p>Get the file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getFilePath -->
* <pre>
* String filePath = fileClient.getFilePath&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getFilePath -->
*
* @return The path of the file.
*/
public String getFilePath() {
return filePath;
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return azureFileStorageClient.getHttpPipeline();
}
AzureSasCredential getSasToken() {
return sasToken;
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues) {
return generateSas(shareServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* &
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues, Context context) {
return new ShareSasImplUtil(shareServiceSasSignatureValues, getShareName(), getFilePath())
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} |
nit: Use `buffer.remaining()` instead and I recommend using `ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer))` | public MappedByteBufferInputStream(ByteBuffer buffer) {
this.byteBuffer = ByteBuffer.allocate(buffer.capacity()).put(buffer);
this.byteBuffer.flip();
} | this.byteBuffer = ByteBuffer.allocate(buffer.capacity()).put(buffer); | public MappedByteBufferInputStream(ByteBuffer buffer) {
this.byteBuffer = ByteBuffer.wrap(FluxUtil.byteBufferToArray(buffer));
this.byteBuffer.flip();
} | class MappedByteBufferInputStream extends InputStream {
private final ByteBuffer byteBuffer;
/**
* Creates a new input stream from the given {@link ByteBuffer}.
*
* @param buffer The buffer to wrap.
*/
@Override
public int read() throws IOException {
if (!byteBuffer.hasRemaining()) {
return -1;
}
return byteBuffer.get() & 0xFF;
}
@Override
public int read(byte[] bytes, int off, int len) throws IOException {
if (!byteBuffer.hasRemaining()) {
return -1;
}
len = Math.min(len, byteBuffer.remaining());
byteBuffer.get(bytes, off, len);
return len;
}
} | class MappedByteBufferInputStream extends InputStream {
private final ByteBuffer byteBuffer;
/**
* Creates a new input stream from the given {@link ByteBuffer}.
*
* @param buffer The buffer to wrap.
*/
@Override
public int read() throws IOException {
if (!byteBuffer.hasRemaining()) {
return -1;
}
return byteBuffer.get() & 0xFF;
}
@Override
public int read(byte[] bytes, int off, int len) throws IOException {
if (!byteBuffer.hasRemaining()) {
return -1;
}
len = Math.min(len, byteBuffer.remaining());
byteBuffer.get(bytes, off, len);
return len;
}
} |
Will revisit adding support for uploadToFile later | public void uploadFromFile(String uploadFilePath, ShareRequestConditions requestConditions) {
List<ShareFileRange> shareFileRanges = ModelHelper.sliceFile(uploadFilePath);
try (FileChannel channel = FileChannel.open(Paths.get(uploadFilePath), StandardOpenOption.READ)) {
shareFileRanges.stream().forEach(range -> {
try {
MappedByteBuffer map = channel.map(READ_ONLY, range.getStart(),
range.getEnd() - range.getStart() + 1);
InputStream inputStream = new MappedByteBufferInputStream(map);
ShareFileUploadRangeOptions shareFileUploadRangeOptions =
new ShareFileUploadRangeOptions(inputStream, range.getEnd() - range.getStart() + 1)
.setRequestConditions(requestConditions)
.setOffset(range.getStart());
uploadRangeWithResponse(shareFileUploadRangeOptions, null, Context.NONE);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
});
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
}
} | MappedByteBuffer map = channel.map(READ_ONLY, range.getStart(), | public void uploadFromFile(String uploadFilePath, ShareRequestConditions requestConditions) {
shareFileAsyncClient.uploadFromFile(uploadFilePath, requestConditions).block();
} | class ShareFileClient {
private final ShareFileAsyncClient shareFileAsyncClient;
private static final ClientLogger LOGGER = new ClientLogger(ShareFileClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String filePath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String fileUrlString;
/**
* Creates a ShareFileClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param filePath Name of the file
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareFileClient(ShareFileAsyncClient shareFileAsyncClient, AzureFileStorageImpl azureFileStorageClient,
String shareName, String filePath, String snapshot, String accountName, ShareServiceVersion serviceVersion,
AzureSasCredential sasToken) {
this.shareFileAsyncClient = shareFileAsyncClient;
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(filePath, "'filePath' cannot be null.");
this.shareName = shareName;
this.filePath = filePath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder fileUrlstring = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(filePath);
if (snapshot != null) {
fileUrlstring.append("?sharesnapshot=").append(snapshot);
}
this.fileUrlString = fileUrlstring.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
public String getFileUrl() {
return this.fileUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Opens a file input stream to download the file.
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream() {
return openInputStream(new ShareFileRange(0));
}
/**
* Opens a file input stream to download the specified range of the file.
*
* @param range {@link ShareFileRange}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream(ShareFileRange range) {
return new StorageFileInputStream(shareFileAsyncClient, range.getStart(),
range.getEnd() == null ? null : (range.getEnd() - range.getStart() + 1));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream() {
return getFileOutputStream(0);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @param offset Starting point of the upload range.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream(long offset) {
return new StorageFileOutputStream(shareFileAsyncClient, offset);
}
/**
* Creates and opens a {@link SeekableByteChannel} to write data to the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelWrite(ShareFileSeekableByteChannelWriteOptions options) {
Objects.requireNonNull(options, "'options' cannot be null.");
if (options.isOverwriteMode()) {
Objects.requireNonNull(options.getFileSizeInBytes(), "'options.getFileSize()' cannot return null.");
create(options.getFileSizeInBytes());
}
int chunkSize = options.getChunkSizeInBytes() != null
? options.getChunkSizeInBytes().intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileWriteBehavior(this, options.getRequestConditions(),
options.getFileLastWrittenMode()), 0L);
}
/**
* Creates and opens a {@link SeekableByteChannel} to read data from the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelRead(ShareFileSeekableByteChannelReadOptions options) {
ShareRequestConditions conditions = options != null ? options.getRequestConditions() : null;
Long configuredChunkSize = options != null ? options.getChunkSizeInBytes() : null;
int chunkSize = configuredChunkSize != null ? configuredChunkSize.intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileReadBehavior(this, conditions), 0L);
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.exists -->
*
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.existsWithResponse
* <pre>
* Context context = new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e)) {
HttpResponse response = ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.create -->
* <pre>
* ShareFileInfo response = fileClient.create&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.create -->
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @return The {@link ShareFileInfo file info}
* @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName
* is an invalid resource name.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo create(long maxSize) {
return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout,
Context context) {
return this.createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, null, timeout,
context);
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
*
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.FILE_PERMISSION_INHERIT);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.FILE_ATTRIBUTES_NONE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.FILE_TIME_NOW);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.FILE_TIME_NOW);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesCreateHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().createWithResponse(shareName, filePath, maxSize, fileAttributes,
null, metadata, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(),
httpHeaders, finalContext);
ResponseBase<FilesCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.createFileInfoResponse(response);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
*
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata,
Duration pollInterval) {
ShareFileCopyOptions options = new ShareFileCopyOptions().setMetadata(metadata);
return this.beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* PermissionCopyModeType.SOURCE, ignoreReadOnly, setArchiveAttribute,
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param filePermissionCopyMode Mode of file permission acquisition.
* @param ignoreReadOnly Whether to copy despite target being read only. (default is false)
* @param setArchiveAttribute Whether the archive attribute is to be set on the target. (default is true)
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param destinationRequestConditions {@link ShareRequestConditions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, FileSmbProperties smbProperties,
String filePermission, PermissionCopyModeType filePermissionCopyMode, Boolean ignoreReadOnly,
Boolean setArchiveAttribute, Map<String, String> metadata, Duration pollInterval,
ShareRequestConditions destinationRequestConditions) {
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(filePermission)
.setPermissionCopyModeType(filePermissionCopyMode)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setMetadata(metadata)
.setDestinationRequestConditions(destinationRequestConditions);
return beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList&
* &
*
* ShareFileCopyOptions options = new ShareFileCopyOptions&
* .setSmbProperties&
* .setFilePermission&
* .setIgnoreReadOnly&
* .setArchiveAttribute&
* .setDestinationRequestConditions&
* .setSmbPropertiesToCopy&
* .setPermissionCopyModeType&
* .setMetadata&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param options {@link ShareFileCopyOptions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, ShareFileCopyOptions options, Duration pollInterval) {
final ShareRequestConditions finalRequestConditions =
options.getDestinationRequestConditions() == null ? new ShareRequestConditions()
: options.getDestinationRequestConditions();
final AtomicReference<String> copyId = new AtomicReference<>();
final Duration interval = pollInterval == null ? Duration.ofSeconds(1) : pollInterval;
FileSmbProperties tempSmbProperties = options.getSmbProperties() == null ? new FileSmbProperties()
: options.getSmbProperties();
String filePermissionKey = tempSmbProperties.getFilePermissionKey();
if (options.getFilePermission() == null || options.getPermissionCopyModeType() == PermissionCopyModeType.SOURCE) {
if ((options.getFilePermission() != null || filePermissionKey != null)
&& options.getPermissionCopyModeType() != PermissionCopyModeType.OVERRIDE) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("File permission and file permission " +
"key can not be set when PermissionCopyModeType is source or null"));
}
} else if (options.getPermissionCopyModeType() == PermissionCopyModeType.OVERRIDE) {
try {
ModelHelper.validateFilePermissionAndKey(options.getFilePermission(),
tempSmbProperties.getFilePermissionKey());
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
}
CopyableFileSmbPropertiesList list = options.getSmbPropertiesToCopy() == null
? new CopyableFileSmbPropertiesList() : options.getSmbPropertiesToCopy();
try {
ModelHelper.validateCopyFlagAndSmbProperties(options, tempSmbProperties);
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
String fileAttributes = list.isFileAttributes() ? FileConstants.COPY_SOURCE : NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = list.isCreatedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = list.isLastWrittenOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangedOnTime = list.isChangedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
final CopyFileSmbInfo copyFileSmbInfo = new CopyFileSmbInfo()
.setFilePermissionCopyMode(options.getPermissionCopyModeType())
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangedOnTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly())
.setSetArchiveAttribute(options.isArchiveAttributeSet());
final String copySource = Utility.encodeUrlPath(sourceUrl);
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> syncActivationOperation =
(pollingContext) -> {
ResponseBase<FilesStartCopyHeaders, Void> response = azureFileStorageClient.getFiles()
.startCopyWithResponse(shareName, filePath, copySource, null,
options.getMetadata(), options.getFilePermission(), tempSmbProperties.getFilePermissionKey(),
finalRequestConditions.getLeaseId(), copyFileSmbInfo, null);
FilesStartCopyHeaders headers = response.getDeserializedHeaders();
copyId.set(headers.getXMsCopyId());
return new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, new ShareFileCopyInfo(
sourceUrl,
headers.getXMsCopyId(),
headers.getXMsCopyStatus(),
headers.getETag(),
headers.getLastModified(),
response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-error-code"))));
};
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> pollOperation = (pollingContext) ->
onPoll(pollingContext.getLatestResponse(), finalRequestConditions);
BiFunction<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>, ShareFileCopyInfo> cancelOperation =
(pollingContext, firstResponse) -> {
if (firstResponse == null || firstResponse.getValue() == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Cannot cancel a poll response that never started."));
}
final String copyIdentifier = firstResponse.getValue().getCopyId();
if (!CoreUtils.isNullOrEmpty(copyIdentifier)) {
LOGGER.info("Cancelling copy operation for copy id: {}", copyIdentifier);
abortCopyWithResponse(copyIdentifier, finalRequestConditions, null, null);
return firstResponse.getValue();
}
return null;
};
Function<PollingContext<ShareFileCopyInfo>, Void> fetchResultOperation = (pollingContext) -> null;
return SyncPoller.createPoller(interval, syncActivationOperation, pollOperation, cancelOperation, fetchResultOperation);
}
private PollResponse<ShareFileCopyInfo> onPoll(PollResponse<ShareFileCopyInfo> pollResponse,
ShareRequestConditions requestConditions) {
if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
|| pollResponse.getStatus() == LongRunningOperationStatus.FAILED) {
return pollResponse;
}
final ShareFileCopyInfo lastInfo = pollResponse.getValue();
if (lastInfo == null) {
LOGGER.warning("ShareFileCopyInfo does not exist. Activation operation failed.");
return new PollResponse<>(LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null);
}
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(requestConditions, null, null);
ShareFileProperties value = response.getValue();
final CopyStatusType status = value.getCopyStatus();
final ShareFileCopyInfo result = new ShareFileCopyInfo(value.getCopySource(), value.getCopyId(),
status, value.getETag(), value.getCopyCompletionTime(), value.getCopyStatusDescription());
LongRunningOperationStatus operationStatus = ModelHelper.mapStatusToLongRunningOperationStatus(status);
return new PollResponse<>(operationStatus, result);
} catch (RuntimeException e) {
return new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo);
}
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopy
* <pre>
* fileClient.abortCopy&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopy(String copyId) {
abortCopyWithResponse(copyId, null, Context.NONE);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* Response<Void> response = fileClient.abortCopyWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) {
return this.abortCopyWithResponse(copyId, null, timeout, context);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.abortCopyWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesAbortCopyHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().abortCopyWithResponse(shareName, filePath, copyId, null,
finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFile
* <pre>
* fileClient.downloadToFile&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @return The properties of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties downloadToFile(String downloadFilePath) {
return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue();
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
Duration timeout, Context context) {
return this.downloadToFileWithResponse(downloadFilePath, range, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* requestConditions, Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath,
range, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file with its metadata and properties. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.download
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* fileClient.download&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @throws NullPointerException If {@code stream} is {@code null}.
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, Context.NONE);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* Response<Void> response = fileClient.downloadWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, Duration timeout, Context context) {
return this.downloadWithResponse(stream, range, rangeGetContentMD5, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.downloadWithResponse&
* requestConditions, Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, ShareRequestConditions requestConditions, Duration timeout, Context context) {
return downloadWithResponse(stream, new ShareFileDownloadOptions().setRange(range)
.setRangeContentMd5Requested(rangeGetContentMD5).setRequestConditions(requestConditions), timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileRange range = new ShareFileRange&
* DownloadRetryOptions retryOptions = new DownloadRetryOptions&
* ShareFileDownloadOptions options = new ShareFileDownloadOptions&
* .setRequestConditions&
* .setRangeContentMd5Requested&
* .setRetryOptions&
* Response<Void> response = fileClient.downloadWithResponse&
* new Context&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param options {@link ShareFileDownloadOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileDownloadOptions options,
Duration timeout, Context context) {
Objects.requireNonNull(stream, "'stream' cannot be null.");
Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(options, context)
.flatMap(response -> FluxUtil.writeToOutputStream(response.getValue(), stream)
.thenReturn(new ShareFileDownloadResponse(response)));
return StorageImplUtils.blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.delete -->
* <pre>
* fileClient.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.delete -->
*
* <p>For more information, see the
* <a href="https:
*
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, Context.NONE);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* Response<Void> response = fileClient.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(Duration timeout, Context context) {
return this.deleteWithResponse(null, timeout, context);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.deleteWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions()
: requestConditions;
Callable<ResponseBase<FilesDeleteHeaders, Void>> operation = () -> this.azureFileStorageClient.getFiles()
.deleteWithResponse(shareName, filePath, null, finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout);
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
* <pre>
* boolean result = fileClient.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
*
* <p>For more information, see the
* <a href="https:
* @return {@code true} if the file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Boolean> response = fileClient.deleteIfExistsWithResponse&
* new Context&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 202, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
try {
Response<Void> response = this.deleteWithResponse(requestConditions, timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(ShareErrorCode.RESOURCE_NOT_FOUND)) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getProperties -->
* <pre>
* ShareFileProperties properties = fileClient.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileProperties Storage file properties}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties getProperties() {
return getPropertiesWithResponse(null, Context.NONE).getValue();
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) {
return this.getPropertiesWithResponse(null, timeout, context);
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesGetPropertiesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().getPropertiesWithResponse(shareName, filePath, snapshot,
null, finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesGetPropertiesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.getPropertiesResponse(response);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileInfo response = fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @return The {@link ShareFileInfo file info}
* @throws IllegalArgumentException thrown if parameters fail the validation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission) {
return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE)
.getValue();
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* filePermission, Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) {
return this.setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null,
timeout, context);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setPropertiesWithResponse&
* null&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.PRESERVE);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.PRESERVE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.PRESERVE);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.PRESERVE);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesSetHttpHeadersHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setHttpHeadersWithResponse(shareName, filePath, fileAttributes, null,
newFileSize, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(), httpHeaders, finalContext);
ResponseBase<FilesSetHttpHeadersHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.setPropertiesResponse(response);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @return The {@link ShareFileMetadataInfo file meta info}
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) {
return setMetadataWithResponse(metadata, null, Context.NONE).getValue();
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout,
Context context) {
return this.setMetadataWithResponse(metadata, null, timeout, context);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesSetMetadataHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setMetadataWithResponse(shareName, filePath, null, metadata,
finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesSetMetadataHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.setMetadataResponse(response);
}
/**
* Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = fileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*
* @deprecated Use {@link ShareFileClient
* {@link ShareFileClient
* large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo upload(InputStream data, long length) {
return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
Duration timeout, Context context) {
return this.uploadWithResponse(data, length, offset, null, timeout, context);
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* requestConditions, Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
return this.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(data, length).setOffset(offset).setRequestConditions(requestConditions),
timeout, context);
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param transferOptions {@link ParallelTransferOptions} for file transfer.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public ShareFileUploadInfo upload(InputStream data, long length, ParallelTransferOptions transferOptions) {
return uploadWithResponse(new ShareFileUploadOptions(data, length).setParallelTransferOptions(transferOptions),
null, Context.NONE).getValue();
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileAsyncClient.uploadWithResponse&
* new ShareFileUploadOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public Response<ShareFileUploadInfo> uploadWithResponse(ShareFileUploadOptions options,
Duration timeout, Context context) {
return StorageImplUtils.blockWithOptionalTimeout(
shareFileAsyncClient.uploadWithResponse(options, context), timeout);
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRange
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.uploadRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRange
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public ShareFileUploadInfo uploadRange(InputStream data, long length) {
return this.uploadRangeWithResponse(new ShareFileUploadRangeOptions(data, length), null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileClient.uploadRangeWithResponse&
* new ShareFileUploadRangeOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public Response<ShareFileUploadInfo> uploadRangeWithResponse(ShareFileUploadRangeOptions options,
Duration timeout, Context context) {
/**
* One-shot upload range.
*/
ShareRequestConditions requestConditions = options.getRequestConditions() == null
? new ShareRequestConditions() : options.getRequestConditions();
long rangeOffset = (options.getOffset() == null) ? 0L : options.getOffset();
ShareFileRange range = new ShareFileRange(rangeOffset, rangeOffset + options.getLength() - 1);
Context finalContext = context == null ? Context.NONE : context;
BinaryData binaryData = options.getDataStream() != null ? BinaryData.fromStream(options.getDataStream())
: BinaryData.fromFlux(options.getDataFlux(), options.getLength()).block();
Callable<ResponseBase<FilesUploadRangeHeaders, Void>> operation = () -> azureFileStorageClient.getFiles()
.uploadRangeWithResponse(shareName, filePath, range.toString(), ShareFileRangeWriteType.UPDATE,
options.getLength(), null, null, requestConditions.getLeaseId(), options.getLastWrittenMode(),
binaryData, finalContext);
ResponseBase<FilesUploadRangeHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.uploadRangeHeadersToShareFileInfo(response);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
* <pre>
* ShareFileUploadRangeFromUrlInfo response = fileClient.uploadRangeFromUrl&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset,
String sourceUrl) {
return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE)
.getValue();
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, Duration timeout, Context context) {
return this.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, timeout,
context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadRangeFromUrlInfo> response = fileClient.uploadRangeFromUrlWithResponse&
* "sourceUrl", requestConditions, Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, ShareRequestConditions requestConditions, Duration timeout,
Context context) {
return this.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(length, sourceUrl)
.setDestinationOffset(destinationOffset).setSourceOffset(sourceOffset)
.setDestinationRequestConditions(requestConditions), timeout, context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* .setDestinationOffset&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options argument collection
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(
ShareFileUploadRangeFromUrlOptions options, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
ShareFileRange destinationRange = new ShareFileRange(options.getDestinationOffset(),
options.getDestinationOffset() + options.getLength() - 1);
ShareFileRange sourceRange = new ShareFileRange(options.getSourceOffset(),
options.getSourceOffset() + options.getLength() - 1);
Context finalContext = context == null ? Context.NONE : context;
String sourceAuth = options.getSourceAuthorization() == null
? null : options.getSourceAuthorization().toString();
String copySource = Utility.encodeUrlPath(options.getSourceUrl());
Callable<ResponseBase<FilesUploadRangeFromURLHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles()
.uploadRangeFromURLWithResponse(shareName, filePath, destinationRange.toString(), copySource, 0,
null, sourceRange.toString(), null, finalRequestConditions.getLeaseId(), sourceAuth,
options.getLastWrittenMode(), null, finalContext);
ResponseBase<FilesUploadRangeFromURLHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.mapUploadRangeFromUrlResponse(response);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clears the first 1024 bytes. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRange
* <pre>
* ShareFileUploadInfo response = fileClient.clearRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRange
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being cleared.
* @return The {@link ShareFileUploadInfo file upload info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo clearRange(long length) {
return clearRangeWithResponse(length, 0, null, Context.NONE).getValue();
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout,
Context context) {
return this.clearRangeWithResponse(length, offset, null, timeout, context);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
ShareFileRange range = new ShareFileRange(offset, offset + length - 1);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesUploadRangeHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().uploadRangeWithResponse(shareName, filePath, range.toString(),
ShareFileRangeWriteType.CLEAR, 0L, null, null, finalRequestConditions.getLeaseId(), null, null,
finalContext);
ResponseBase<FilesUploadRangeHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return ModelHelper.transformUploadResponse(response);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String uploadFilePath) {
this.uploadFromFile(uploadFilePath, null);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
* @param requestConditions {@link ShareRequestConditions}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges -->
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileRange ranges} in the files.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges() {
return listRanges((ShareFileRange) null, null, null);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* new Context&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) {
return this.listRanges(range, null, timeout, context);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* Duration.ofSeconds&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
String rangeString = range == null ? null : range.toString();
try {
Supplier<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
null, null, rangeString, finalRequestConditions.getLeaseId(), null, finalContext);
ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
List<ShareFileRange> shareFileRangeList =
response.getValue().getRanges().stream()
.map(r -> new Range().setStart(r.getStart()).setEnd(r.getEnd()))
.map(ShareFileRange::new).collect(Collectors.toList());
Supplier<PagedResponse<ShareFileRange>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(), shareFileRangeList, null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiff
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiff&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiff
*
* <p>For more information, see the
* <a href="https:
*
* @param previousSnapshot Specifies that the response will contain only ranges that were changed between target
* file and previous snapshot. Changed ranges include both updated and cleared ranges. The target file may be a
* snapshot, as long as the snapshot specified by previousSnapshot is the older of the two.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileRangeList listRangesDiff(String previousSnapshot) {
return this.listRangesDiffWithResponse(new ShareFileListRangesDiffOptions(previousSnapshot), null, Context.NONE)
.getValue();
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiffWithResponse&
* new ShareFileListRangesDiffOptions&
* .setRange&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ShareFileListRangesDiffOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileRangeList> listRangesDiffWithResponse(ShareFileListRangesDiffOptions options,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions requestConditions = options.getRequestConditions() == null
? new ShareRequestConditions() : options.getRequestConditions();
String rangeString = options.getRange() == null ? null : options.getRange().toString();
Callable<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
options.getPreviousSnapshot(), null, rangeString, requestConditions.getLeaseId(), null, finalContext);
return StorageImplUtils.sendRequest(operation, timeout);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles -->
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link HandleItem handles} in the files that satisfy the requirements
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles() {
return listHandles(null, null, Context.NONE);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List 10 handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param maxResultsPerPage Optional max number of results returned per page
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link HandleItem handles} in the file that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesListHandlesHeaders, ListHandlesResponse>> operation = () ->
this.azureFileStorageClient.getFiles().listHandlesWithResponse(shareName, filePath, null,
maxResultsPerPage, null, snapshot, finalContext);
ResponseBase<FilesListHandlesHeaders, ListHandlesResponse> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<HandleItem>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ModelHelper.transformHandleItems(response.getValue().getHandleList()),
null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandle
* <pre>
* fileClient.listHandles&
* fileClient.forceCloseHandle&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandle
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @return Information about the closed handles.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseHandle(String handleId) {
return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue();
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
* <pre>
* fileClient.listHandles&
* Response<CloseHandlesInfo> closeResponse = fileClient
* .forceCloseHandleWithResponse&
* System.out.printf&
* handleItem.getHandleId&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that contains information about the closed handles, headers and response status code.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, handleId,
null, null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return new SimpleResponse<>(response,
new CloseHandlesInfo(response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed()));
}
/**
* Closes all handles opened on the file at the service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close all handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
* <pre>
* CloseHandlesInfo closeHandlesInfo = fileClient.forceCloseAllHandles&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the closed handles
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, "*", null,
null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<CloseHandlesInfo>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(),
Collections.singletonList(new CloseHandlesInfo(
response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed())),
response.getDeserializedHeaders().getXMsMarker(),
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse).stream().reduce(new CloseHandlesInfo(0, 0),
(accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles(),
accu.getFailedHandles() + next.getFailedHandles()));
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.rename
* <pre>
* ShareFileClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.rename
*
* @param destinationPath Relative path from the share to rename the file to.
* @return A {@link ShareFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileClient rename(String destinationPath) {
return renameWithResponse(new ShareFileRenameOptions(destinationPath), null, Context.NONE).getValue();
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.renameWithResponse
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* ShareFileRenameOptions options = new ShareFileRenameOptions&
* .setDestinationRequestConditions&
* .setSourceRequestConditions&
* .setIgnoreReadOnly&
* .setReplaceIfExists&
* .setFilePermission&
* .setSmbProperties&
*
* ShareFileClient newRenamedClient = client.renameWithResponse&
* .getValue&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.renameWithResponse
*
* @param options {@link ShareFileRenameOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} whose {@link Response
* interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileClient> renameWithResponse(ShareFileRenameOptions options, Duration timeout,
Context context) {
StorageImplUtils.assertNotNull("options", options);
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions sourceRequestConditions = options.getSourceRequestConditions() == null
? new ShareRequestConditions() : options.getSourceRequestConditions();
ShareRequestConditions destinationRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
SourceLeaseAccessConditions sourceConditions = new SourceLeaseAccessConditions()
.setSourceLeaseId(sourceRequestConditions.getLeaseId());
DestinationLeaseAccessConditions destinationConditions = new DestinationLeaseAccessConditions()
.setDestinationLeaseId(destinationRequestConditions.getLeaseId());
CopyFileSmbInfo smbInfo;
String filePermissionKey;
if (options.getSmbProperties() != null) {
FileSmbProperties tempSmbProperties = options.getSmbProperties();
filePermissionKey = tempSmbProperties.getFilePermissionKey();
String fileAttributes = NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangeTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
smbInfo = new CopyFileSmbInfo()
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangeTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly());
} else {
smbInfo = null;
filePermissionKey = null;
}
ShareFileClient destinationFileClient = getFileClient(options.getDestinationPath());
ShareFileHttpHeaders headers = options.getContentType() == null ? null
: new ShareFileHttpHeaders().setContentType(options.getContentType());
String renameSource = this.sasToken != null
? this.getFileUrl() + "?" + this.sasToken.getSignature() : this.getFileUrl();
Callable<ResponseBase<FilesRenameHeaders, Void>> operation = () ->
destinationFileClient.azureFileStorageClient.getFiles().renameWithResponse(
destinationFileClient.getShareName(), destinationFileClient.getFilePath(), renameSource,
null /* timeout */, options.getReplaceIfExists(), options.isIgnoreReadOnly(),
options.getFilePermission(), filePermissionKey, options.getMetadata(), sourceConditions,
destinationConditions, smbInfo, headers, finalContext);
ResponseBase<FilesRenameHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout);
return new SimpleResponse<>(response, destinationFileClient);
}
ShareFileClient getFileClient(String destinationPath) {
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new ShareFileClient(shareFileAsyncClient, this.azureFileStorageClient, getShareName(), destinationPath, null,
this.getAccountName(), this.getServiceVersion(), this.getSasToken());
}
/**
* Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Get the share snapshot id. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
* <pre>
* OffsetDateTime currentTime = OffsetDateTime.of&
* ShareFileClient fileClient = new ShareFileClientBuilder&
* .endpoint&
* .sasToken&
* .shareName&
* .resourcePath&
* .snapshot&
* .buildFileClient&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
*
* @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base
* share.
*/
public String getShareSnapshotId() {
return this.snapshot;
}
/**
* Get the share name of file client.
*
* <p>Get the share name. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareName -->
* <pre>
* String shareName = fileClient.getShareName&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareName -->
*
* @return The share name of the file.
*/
public String getShareName() {
return shareName;
}
/**
* Get file path of the client.
*
* <p>Get the file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getFilePath -->
* <pre>
* String filePath = fileClient.getFilePath&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getFilePath -->
*
* @return The path of the file.
*/
public String getFilePath() {
return filePath;
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return azureFileStorageClient.getHttpPipeline();
}
AzureSasCredential getSasToken() {
return sasToken;
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues) {
return generateSas(shareServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* &
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues, Context context) {
return new ShareSasImplUtil(shareServiceSasSignatureValues, getShareName(), getFilePath())
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} | class ShareFileClient {
private final ShareFileAsyncClient shareFileAsyncClient;
private static final ClientLogger LOGGER = new ClientLogger(ShareFileClient.class);
private final AzureFileStorageImpl azureFileStorageClient;
private final String shareName;
private final String filePath;
private final String snapshot;
private final String accountName;
private final ShareServiceVersion serviceVersion;
private final AzureSasCredential sasToken;
private final String fileUrlString;
/**
* Creates a ShareFileClient.
* @param azureFileStorageClient Client that interacts with the service interfaces
* @param shareName Name of the share
* @param filePath Name of the file
* @param snapshot The snapshot of the share
* @param accountName Name of the account
* @param serviceVersion The version of the service to be used when making requests.
* @param sasToken The SAS token used to authenticate the request
*/
ShareFileClient(ShareFileAsyncClient shareFileAsyncClient, AzureFileStorageImpl azureFileStorageClient,
String shareName, String filePath, String snapshot, String accountName, ShareServiceVersion serviceVersion,
AzureSasCredential sasToken) {
this.shareFileAsyncClient = shareFileAsyncClient;
Objects.requireNonNull(shareName, "'shareName' cannot be null.");
Objects.requireNonNull(filePath, "'filePath' cannot be null.");
this.shareName = shareName;
this.filePath = filePath;
this.snapshot = snapshot;
this.azureFileStorageClient = azureFileStorageClient;
this.accountName = accountName;
this.serviceVersion = serviceVersion;
this.sasToken = sasToken;
StringBuilder fileUrlstring = new StringBuilder(azureFileStorageClient.getUrl()).append("/")
.append(shareName).append("/").append(filePath);
if (snapshot != null) {
fileUrlstring.append("?sharesnapshot=").append(snapshot);
}
this.fileUrlString = fileUrlstring.toString();
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureFileStorageClient.getUrl();
}
/**
* Get the url of the storage file client.
*
* @return the URL of the storage file client.
*/
public String getFileUrl() {
return this.fileUrlString;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public ShareServiceVersion getServiceVersion() {
return this.serviceVersion;
}
/**
* Opens a file input stream to download the file.
*
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream() {
return openInputStream(new ShareFileRange(0));
}
/**
* Opens a file input stream to download the specified range of the file.
*
* @param range {@link ShareFileRange}
* @return An <code>InputStream</code> object that represents the stream to use for reading from the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileInputStream openInputStream(ShareFileRange range) {
return new StorageFileInputStream(shareFileAsyncClient, range.getStart(),
range.getEnd() == null ? null : (range.getEnd() - range.getStart() + 1));
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream() {
return getFileOutputStream(0);
}
/**
* Creates and opens an output stream to write data to the file. If the file already exists on the service, it will
* be overwritten.
*
* @param offset Starting point of the upload range.
* @return A {@link StorageFileOutputStream} object used to write data to the file.
* @throws ShareStorageException If a storage service error occurred.
*/
public final StorageFileOutputStream getFileOutputStream(long offset) {
return new StorageFileOutputStream(shareFileAsyncClient, offset);
}
/**
* Creates and opens a {@link SeekableByteChannel} to write data to the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelWrite(ShareFileSeekableByteChannelWriteOptions options) {
Objects.requireNonNull(options, "'options' cannot be null.");
if (options.isOverwriteMode()) {
Objects.requireNonNull(options.getFileSizeInBytes(), "'options.getFileSize()' cannot return null.");
create(options.getFileSizeInBytes());
}
int chunkSize = options.getChunkSizeInBytes() != null
? options.getChunkSizeInBytes().intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileWriteBehavior(this, options.getRequestConditions(),
options.getFileLastWrittenMode()), 0L);
}
/**
* Creates and opens a {@link SeekableByteChannel} to read data from the file.
* @param options Options for opening the channel.
* @return The opened channel.
*/
public SeekableByteChannel getFileSeekableByteChannelRead(ShareFileSeekableByteChannelReadOptions options) {
ShareRequestConditions conditions = options != null ? options.getRequestConditions() : null;
Long configuredChunkSize = options != null ? options.getChunkSizeInBytes() : null;
int chunkSize = configuredChunkSize != null ? configuredChunkSize.intValue() : (int) ModelHelper.FILE_MAX_PUT_RANGE_SIZE;
return new StorageSeekableByteChannel(chunkSize,
new StorageSeekableByteChannelShareFileReadBehavior(this, conditions), 0L);
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.exists -->
* <pre>
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.exists -->
*
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Determines if the file this client represents exists in the cloud.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.existsWithResponse
* <pre>
* Context context = new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.existsWithResponse
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Flag indicating existence of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> existsWithResponse(Duration timeout, Context context) {
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(timeout, context);
return new SimpleResponse<>(response, true);
} catch (RuntimeException e) {
if (ModelHelper.checkDoesNotExistStatusCode(e) && e instanceof HttpResponseException) {
HttpResponse response = ((HttpResponseException) e).getResponse();
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.create -->
* <pre>
* ShareFileInfo response = fileClient.create&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.create -->
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @return The {@link ShareFileInfo file info}
* @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName
* is an invalid resource name.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo create(long maxSize) {
return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout,
Context context) {
return this.createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, null, timeout,
context);
}
/**
* Creates a file in the storage account and returns a response of ShareFileInfo to interact with it.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.createWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
*
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* Response<ShareFileInfo> response = fileClient.createWithResponse&
* filePermission, Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.createWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param maxSize The maximum size in bytes for the file.
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param metadata Optional name-value pairs associated with the file as metadata.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileInfo file info} and the status of creating the file.
* @throws ShareStorageException If the directory has already existed, the parent directory does not exist or
* directory is an invalid resource name.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
* @see <a href="https:
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.FILE_PERMISSION_INHERIT);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.FILE_ATTRIBUTES_NONE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.FILE_TIME_NOW);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.FILE_TIME_NOW);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesCreateHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().createWithResponse(shareName, filePath, maxSize, fileAttributes,
null, metadata, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(),
httpHeaders, finalContext);
ResponseBase<FilesCreateHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.createFileInfoResponse(response);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
*
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata,
Duration pollInterval) {
ShareFileCopyOptions options = new ShareFileCopyOptions().setMetadata(metadata);
return this.beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
* PermissionCopyModeType.SOURCE, ignoreReadOnly, setArchiveAttribute,
* Collections.singletonMap&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file.
* @param filePermissionCopyMode Mode of file permission acquisition.
* @param ignoreReadOnly Whether to copy despite target being read only. (default is false)
* @param setArchiveAttribute Whether the archive attribute is to be set on the target. (default is true)
* @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the
* naming rules.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param destinationRequestConditions {@link ShareRequestConditions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, FileSmbProperties smbProperties,
String filePermission, PermissionCopyModeType filePermissionCopyMode, Boolean ignoreReadOnly,
Boolean setArchiveAttribute, Map<String, String> metadata, Duration pollInterval,
ShareRequestConditions destinationRequestConditions) {
ShareFileCopyOptions options = new ShareFileCopyOptions()
.setSmbProperties(smbProperties)
.setFilePermission(filePermission)
.setPermissionCopyModeType(filePermissionCopyMode)
.setIgnoreReadOnly(ignoreReadOnly)
.setArchiveAttribute(setArchiveAttribute)
.setMetadata(metadata)
.setDestinationRequestConditions(destinationRequestConditions);
return beginCopy(sourceUrl, options, pollInterval);
}
/**
* Copies a blob or file to a destination file within the storage account.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.beginCopy
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* boolean ignoreReadOnly = false; &
* boolean setArchiveAttribute = true; &
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList&
* &
*
* ShareFileCopyOptions options = new ShareFileCopyOptions&
* .setSmbProperties&
* .setFilePermission&
* .setIgnoreReadOnly&
* .setArchiveAttribute&
* .setDestinationRequestConditions&
* .setSmbPropertiesToCopy&
* .setPermissionCopyModeType&
* .setMetadata&
*
* SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy&
* "https:&
*
* final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll&
* final ShareFileCopyInfo value = pollResponse.getValue&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.beginCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @param options {@link ShareFileCopyOptions}
* @return A {@link SyncPoller} to poll the progress of copy operation.
* @see <a href="https:
*/
public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, ShareFileCopyOptions options, Duration pollInterval) {
final ShareRequestConditions finalRequestConditions =
options.getDestinationRequestConditions() == null ? new ShareRequestConditions()
: options.getDestinationRequestConditions();
final AtomicReference<String> copyId = new AtomicReference<>();
final Duration interval = pollInterval == null ? Duration.ofSeconds(1) : pollInterval;
FileSmbProperties tempSmbProperties = options.getSmbProperties() == null ? new FileSmbProperties()
: options.getSmbProperties();
String filePermissionKey = tempSmbProperties.getFilePermissionKey();
if (options.getFilePermission() == null || options.getPermissionCopyModeType() == PermissionCopyModeType.SOURCE) {
if ((options.getFilePermission() != null || filePermissionKey != null)
&& options.getPermissionCopyModeType() != PermissionCopyModeType.OVERRIDE) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("File permission and file permission key can not be set when PermissionCopyModeType is source or null"));
}
} else if (options.getPermissionCopyModeType() == PermissionCopyModeType.OVERRIDE) {
try {
ModelHelper.validateFilePermissionAndKey(options.getFilePermission(),
tempSmbProperties.getFilePermissionKey());
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
}
CopyableFileSmbPropertiesList list = options.getSmbPropertiesToCopy() == null
? new CopyableFileSmbPropertiesList() : options.getSmbPropertiesToCopy();
try {
ModelHelper.validateCopyFlagAndSmbProperties(options, tempSmbProperties);
} catch (RuntimeException ex) {
throw LOGGER.logExceptionAsError(ex);
}
String fileAttributes = list.isFileAttributes() ? FileConstants.COPY_SOURCE : NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = list.isCreatedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = list.isLastWrittenOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangedOnTime = list.isChangedOn() ? FileConstants.COPY_SOURCE : FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
final CopyFileSmbInfo copyFileSmbInfo = new CopyFileSmbInfo()
.setFilePermissionCopyMode(options.getPermissionCopyModeType())
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangedOnTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly())
.setSetArchiveAttribute(options.isArchiveAttributeSet());
final String copySource = Utility.encodeUrlPath(sourceUrl);
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> syncActivationOperation =
(pollingContext) -> {
ResponseBase<FilesStartCopyHeaders, Void> response = azureFileStorageClient.getFiles()
.startCopyWithResponse(shareName, filePath, copySource, null,
options.getMetadata(), options.getFilePermission(), tempSmbProperties.getFilePermissionKey(),
finalRequestConditions.getLeaseId(), copyFileSmbInfo, null);
FilesStartCopyHeaders headers = response.getDeserializedHeaders();
copyId.set(headers.getXMsCopyId());
return new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, new ShareFileCopyInfo(
sourceUrl,
headers.getXMsCopyId(),
headers.getXMsCopyStatus(),
headers.getETag(),
headers.getLastModified(),
response.getHeaders().getValue(HttpHeaderName.fromString("x-ms-error-code"))));
};
Function<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>> pollOperation = (pollingContext) ->
onPoll(pollingContext.getLatestResponse(), finalRequestConditions);
BiFunction<PollingContext<ShareFileCopyInfo>, PollResponse<ShareFileCopyInfo>, ShareFileCopyInfo> cancelOperation =
(pollingContext, firstResponse) -> {
if (firstResponse == null || firstResponse.getValue() == null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("Cannot cancel a poll response that never started."));
}
final String copyIdentifier = firstResponse.getValue().getCopyId();
if (!CoreUtils.isNullOrEmpty(copyIdentifier)) {
LOGGER.info("Cancelling copy operation for copy id: {}", copyIdentifier);
abortCopyWithResponse(copyIdentifier, finalRequestConditions, null, null);
return firstResponse.getValue();
}
return null;
};
Function<PollingContext<ShareFileCopyInfo>, Void> fetchResultOperation = (pollingContext) -> null;
return SyncPoller.createPoller(interval, syncActivationOperation, pollOperation, cancelOperation, fetchResultOperation);
}
private PollResponse<ShareFileCopyInfo> onPoll(PollResponse<ShareFileCopyInfo> pollResponse,
ShareRequestConditions requestConditions) {
if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
|| pollResponse.getStatus() == LongRunningOperationStatus.FAILED) {
return pollResponse;
}
final ShareFileCopyInfo lastInfo = pollResponse.getValue();
if (lastInfo == null) {
LOGGER.warning("ShareFileCopyInfo does not exist. Activation operation failed.");
return new PollResponse<>(LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null);
}
try {
Response<ShareFileProperties> response = getPropertiesWithResponse(requestConditions, null, null);
ShareFileProperties value = response.getValue();
final CopyStatusType status = value.getCopyStatus();
final ShareFileCopyInfo result = new ShareFileCopyInfo(value.getCopySource(), value.getCopyId(),
status, value.getETag(), value.getCopyCompletionTime(), value.getCopyStatusDescription());
LongRunningOperationStatus operationStatus = ModelHelper.mapStatusToLongRunningOperationStatus(status);
return new PollResponse<>(operationStatus, result);
} catch (RuntimeException e) {
return new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo);
}
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopy
* <pre>
* fileClient.abortCopy&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopy
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopy(String copyId) {
abortCopyWithResponse(copyId, null, Context.NONE);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* Response<Void> response = fileClient.abortCopyWithResponse&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) {
return this.abortCopyWithResponse(copyId, null, timeout, context);
}
/**
* Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Abort copy file from copy id("someCopyId") </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.abortCopyWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param copyId Specifies the copy id which has copying pending status associate with it.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the status of aborting copy the file.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> abortCopyWithResponse(String copyId, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesAbortCopyHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().abortCopyWithResponse(shareName, filePath, copyId, null,
finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFile
* <pre>
* fileClient.downloadToFile&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFile
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @return The properties of the file.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties downloadToFile(String downloadFilePath) {
return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue();
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
Duration timeout, Context context) {
return this.downloadToFileWithResponse(downloadFilePath, range, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes to current folder. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response =
* fileClient.downloadToFileWithResponse&
* requestConditions, Duration.ofSeconds&
* if &
* System.out.println&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param downloadFilePath The path where store the downloaded file
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The response of the file properties.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath,
range, requestConditions, context);
return StorageImplUtils.blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file with its metadata and properties. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.download
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* fileClient.download&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.download
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @throws NullPointerException If {@code stream} is {@code null}.
*/
public void download(OutputStream stream) {
downloadWithResponse(stream, null, null, null, Context.NONE);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* Response<Void> response = fileClient.downloadWithResponse&
* Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, Duration timeout, Context context) {
return this.downloadWithResponse(stream, range, rangeGetContentMD5, null, timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.downloadWithResponse&
* requestConditions, Duration.ofSeconds&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param range Optional byte range which returns file data only from the specified range.
* @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to
* true, as long as the range is less than or equal to 4 MB in size.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range,
Boolean rangeGetContentMD5, ShareRequestConditions requestConditions, Duration timeout, Context context) {
return downloadWithResponse(stream, new ShareFileDownloadOptions().setRange(range)
.setRangeContentMd5Requested(rangeGetContentMD5).setRequestConditions(requestConditions), timeout, context);
}
/**
* Downloads a file from the system, including its metadata and properties
*
* <p><strong>Code Samples</strong></p>
*
* <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.downloadWithResponse
* <pre>
* try &
* ByteArrayOutputStream stream = new ByteArrayOutputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileRange range = new ShareFileRange&
* DownloadRetryOptions retryOptions = new DownloadRetryOptions&
* ShareFileDownloadOptions options = new ShareFileDownloadOptions&
* .setRequestConditions&
* .setRangeContentMd5Requested&
* .setRetryOptions&
* Response<Void> response = fileClient.downloadWithResponse&
* new Context&
*
* System.out.printf&
* System.out.printf&
* new String&
* &
* System.err.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.downloadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param stream A non-null {@link OutputStream} where the downloaded data will be written.
* @param options {@link ShareFileDownloadOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the headers and response status code
* @throws NullPointerException If {@code stream} is {@code null}.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileDownloadOptions options,
Duration timeout, Context context) {
Objects.requireNonNull(stream, "'stream' cannot be null.");
Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(options, context)
.flatMap(response -> FluxUtil.writeToOutputStream(response.getValue(), stream)
.thenReturn(new ShareFileDownloadResponse(response)));
return StorageImplUtils.blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.delete -->
* <pre>
* fileClient.delete&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.delete -->
*
* <p>For more information, see the
* <a href="https:
*
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, Context.NONE);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* Response<Void> response = fileClient.deleteWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(Duration timeout, Context context) {
return this.deleteWithResponse(null, timeout, context);
}
/**
* Deletes the file associate with the client.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Void> response = fileClient.deleteWithResponse&
* new Context&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that only contains headers and response status code
* @throws ShareStorageException If the directory doesn't exist or the file doesn't exist.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Void> deleteWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions()
: requestConditions;
Callable<ResponseBase<FilesDeleteHeaders, Void>> operation = () -> this.azureFileStorageClient.getFiles()
.deleteWithResponse(shareName, filePath, null, finalRequestConditions.getLeaseId(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
* <pre>
* boolean result = fileClient.deleteIfExists&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExists -->
*
* <p>For more information, see the
* <a href="https:
* @return {@code true} if the file is successfully deleted, {@code false} if the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(null, null, Context.NONE).getValue();
}
/**
* Deletes the file associate with the client if it exists.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<Boolean> response = fileClient.deleteIfExistsWithResponse&
* new Context&
* if &
* System.out.println&
* &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.deleteIfExistsWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 202, the file
* was successfully deleted. If status code is 404, the file does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<Boolean> deleteIfExistsWithResponse(ShareRequestConditions requestConditions, Duration timeout,
Context context) {
try {
Response<Void> response = this.deleteWithResponse(requestConditions, timeout, context);
return new SimpleResponse<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), true);
} catch (ShareStorageException e) {
if (e.getStatusCode() == 404 && e.getErrorCode().equals(ShareErrorCode.RESOURCE_NOT_FOUND)) {
HttpResponse res = e.getResponse();
return new SimpleResponse<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), false);
} else {
throw LOGGER.logExceptionAsError(e);
}
}
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getProperties -->
* <pre>
* ShareFileProperties properties = fileClient.getProperties&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getProperties -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileProperties Storage file properties}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileProperties getProperties() {
return getPropertiesWithResponse(null, Context.NONE).getValue();
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) {
return this.getPropertiesWithResponse(null, timeout, context);
}
/**
* Retrieves the properties of the storage account's file. The properties include file metadata, last modified
* date, is server encrypted, and eTag.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Retrieve file properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileProperties Storage file properties} with headers and
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileProperties> getPropertiesWithResponse(ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesGetPropertiesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().getPropertiesWithResponse(shareName, filePath, snapshot,
null, finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesGetPropertiesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.getPropertiesResponse(response);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setProperties
* <pre>
* ShareFileInfo response = fileClient.setProperties&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setProperties
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @return The {@link ShareFileInfo file info}
* @throws IllegalArgumentException thrown if parameters fail the validation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission) {
return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE)
.getValue();
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* filePermission, Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) {
return this.setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null,
timeout, context);
}
/**
* Sets the user-defined httpHeaders to associate to the file.
*
* <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the httpHeaders of contentType of "text/plain"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders&
* .setContentType&
* .setContentEncoding&
* .setContentLanguage&
* .setCacheControl&
* .setContentDisposition&
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* String filePermission = "filePermission";
* &
* fileClient.setPropertiesWithResponse&
* null&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>Clear the httpHeaders of the file and preserve the SMB properties</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param newFileSize New file size of the file
* @param httpHeaders The user settable file http headers.
* @param smbProperties The user settable file smb properties.
* @param filePermission The file permission of the file
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileInfo file info} with headers and status code
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders,
FileSmbProperties smbProperties, String filePermission, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
smbProperties = smbProperties == null ? new FileSmbProperties() : smbProperties;
ModelHelper.validateFilePermissionAndKey(filePermission, smbProperties.getFilePermissionKey());
String finalFilePermission = smbProperties.setFilePermission(filePermission, FileConstants.PRESERVE);
String filePermissionKey = smbProperties.getFilePermissionKey();
String fileAttributes = smbProperties.setNtfsFileAttributes(FileConstants.PRESERVE);
String fileCreationTime = smbProperties.setFileCreationTime(FileConstants.PRESERVE);
String fileLastWriteTime = smbProperties.setFileLastWriteTime(FileConstants.PRESERVE);
String fileChangeTime = smbProperties.getFileChangeTimeString();
Callable<ResponseBase<FilesSetHttpHeadersHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setHttpHeadersWithResponse(shareName, filePath, fileAttributes, null,
newFileSize, finalFilePermission, filePermissionKey, fileCreationTime, fileLastWriteTime,
fileChangeTime, finalRequestConditions.getLeaseId(), httpHeaders, finalContext);
ResponseBase<FilesSetHttpHeadersHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.setPropertiesResponse(response);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadata
* <pre>
* fileClient.setMetadata&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadata
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @return The {@link ShareFileMetadataInfo file meta info}
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) {
return setMetadataWithResponse(metadata, null, Context.NONE).getValue();
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout,
Context context) {
return this.setMetadataWithResponse(metadata, null, timeout, context);
}
/**
* Sets the user-defined metadata to associate to the file.
*
* <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the metadata to "file:updatedMetadata"</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Collections.singletonMap&
* new Context&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>Clear the metadata of the file</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code
* @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null ? new ShareRequestConditions() : requestConditions;
Callable<ResponseBase<FilesSetMetadataHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().setMetadataWithResponse(shareName, filePath, null, metadata,
finalRequestConditions.getLeaseId(), finalContext);
ResponseBase<FilesSetMetadataHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.setMetadataResponse(response);
}
/**
* Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = fileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*
* @deprecated Use {@link ShareFileClient
* {@link ShareFileClient
* large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo upload(InputStream data, long length) {
return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
Duration timeout, Context context) {
return this.uploadWithResponse(data, length, offset, null, timeout, context);
}
/**
* Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" starting from 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse&
* requestConditions, Duration.ofSeconds&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*
* @deprecated Use {@link ShareFileClient
* instead. Or consider {@link ShareFileClient
* an upload that can handle large amounts of data.
*/
@Deprecated
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
return this.uploadRangeWithResponse(
new ShareFileUploadRangeOptions(data, length).setOffset(offset).setRequestConditions(requestConditions),
timeout, context);
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.upload
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.upload&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.upload
*
* <p>For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @param transferOptions {@link ParallelTransferOptions} for file transfer.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public ShareFileUploadInfo upload(InputStream data, long length, ParallelTransferOptions transferOptions) {
return uploadWithResponse(new ShareFileUploadOptions(data, length).setParallelTransferOptions(transferOptions),
null, Context.NONE).getValue();
}
/**
* Buffers a range of bytes and uploads sub-ranges in parallel to a file in storage file service. Upload operations
* perform an in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileAsyncClient.uploadWithResponse&
* new ShareFileUploadOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
*/
public Response<ShareFileUploadInfo> uploadWithResponse(ShareFileUploadOptions options,
Duration timeout, Context context) {
return StorageImplUtils.blockWithOptionalTimeout(
shareFileAsyncClient.uploadWithResponse(options, context), timeout);
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRange
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* ShareFileUploadInfo response = shareFileClient.uploadRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRange
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param data The data which will upload to the storage file.
* @param length Specifies the number of bytes being transmitted in the request body. Value must be greater than or
* equal to 1.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public ShareFileUploadInfo uploadRange(InputStream data, long length) {
return this.uploadRangeWithResponse(new ShareFileUploadRangeOptions(data, length), null, Context.NONE).getValue();
}
/**
* Uploads a range of bytes to the specified offset of a file in storage file service. Upload operations perform an
* in-place write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload data "default" to the file in Storage File Service. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
* <pre>
* InputStream uploadData = new ByteArrayInputStream&
* Response<ShareFileUploadInfo> response = shareFileClient.uploadRangeWithResponse&
* new ShareFileUploadRangeOptions&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse
*
* <p>This method does a single Put Range operation. For more information, see the
* <a href="https:
*
* @param options Argument collection for the upload operation.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The {@link ShareFileUploadInfo file upload info}
* @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns
* status code 413 (Request Entity Too Large)
*/
public Response<ShareFileUploadInfo> uploadRangeWithResponse(ShareFileUploadRangeOptions options,
Duration timeout, Context context) {
return StorageImplUtils.blockWithOptionalTimeout(
shareFileAsyncClient.uploadRangeWithResponse(options, context), timeout);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
* <pre>
* ShareFileUploadRangeFromUrlInfo response = fileClient.uploadRangeFromUrl&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset,
String sourceUrl) {
return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE)
.getValue();
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, Duration timeout, Context context) {
return this.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, timeout,
context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadRangeFromUrlInfo> response = fileClient.uploadRangeFromUrlWithResponse&
* "sourceUrl", requestConditions, Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param destinationOffset Starting point of the upload range on the destination.
* @param sourceOffset Starting point of the upload range on the source.
* @param sourceUrl Specifies the URL of the source file.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset,
long sourceOffset, String sourceUrl, ShareRequestConditions requestConditions, Duration timeout,
Context context) {
return this.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(length, sourceUrl)
.setDestinationOffset(destinationOffset).setSourceOffset(sourceOffset)
.setDestinationRequestConditions(requestConditions), timeout, context);
}
/**
* Uploads a range of bytes from one file to another file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload a number of bytes from a file at defined source and destination offsets </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
* <pre>
* Response<ShareFileUploadRangeFromUrlInfo> response =
* fileClient.uploadRangeFromUrlWithResponse&
* .setDestinationOffset&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options argument collection
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with
* headers and response status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(
ShareFileUploadRangeFromUrlOptions options, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
ShareFileRange destinationRange = new ShareFileRange(options.getDestinationOffset(),
options.getDestinationOffset() + options.getLength() - 1);
ShareFileRange sourceRange = new ShareFileRange(options.getSourceOffset(),
options.getSourceOffset() + options.getLength() - 1);
Context finalContext = context == null ? Context.NONE : context;
String sourceAuth = options.getSourceAuthorization() == null
? null : options.getSourceAuthorization().toString();
String copySource = Utility.encodeUrlPath(options.getSourceUrl());
Callable<ResponseBase<FilesUploadRangeFromURLHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles()
.uploadRangeFromURLWithResponse(shareName, filePath, destinationRange.toString(), copySource, 0,
null, sourceRange.toString(), null, finalRequestConditions.getLeaseId(), sourceAuth,
options.getLastWrittenMode(), null, finalContext);
ResponseBase<FilesUploadRangeFromURLHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.mapUploadRangeFromUrlResponse(response);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clears the first 1024 bytes. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRange
* <pre>
* ShareFileUploadInfo response = fileClient.clearRange&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRange
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being cleared.
* @return The {@link ShareFileUploadInfo file upload info}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileUploadInfo clearRange(long length) {
return clearRangeWithResponse(length, 0, null, Context.NONE).getValue();
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout,
Context context) {
return this.clearRangeWithResponse(length, offset, null, timeout, context);
}
/**
* Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place
* write on the specified file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the range starting from 1024 with length of 1024. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse&
* Duration.ofSeconds&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param length Specifies the number of bytes being transmitted in the request body.
* @param offset Starting point of the upload range, if {@code null} it will start from the beginning.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response
* status code.
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset,
ShareRequestConditions requestConditions, Duration timeout, Context context) {
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
ShareFileRange range = new ShareFileRange(offset, offset + length - 1);
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesUploadRangeHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().uploadRangeWithResponse(shareName, filePath, range.toString(),
ShareFileRangeWriteType.CLEAR, 0L, null, null, finalRequestConditions.getLeaseId(), null, null,
finalContext);
ResponseBase<FilesUploadRangeHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return ModelHelper.transformUploadResponse(response);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void uploadFromFile(String uploadFilePath) {
this.uploadFromFile(uploadFilePath, null);
}
/**
* Uploads file to storage file service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Upload the file from the source file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.uploadFromFile
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* fileClient.uploadFromFile&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.uploadFromFile
*
* <p>For more information, see the
* <a href="https:
* and
* <a href="https:
*
* @param uploadFilePath The path where store the source file to upload
* @param requestConditions {@link ShareRequestConditions}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges -->
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link ShareFileRange ranges} in the files.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges() {
return listRanges((ShareFileRange) null, null, null);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* new Context&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) {
return this.listRanges(range, null, timeout, context);
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRanges
* <pre>
* ShareRequestConditions requestConditions = new ShareRequestConditions&
* Iterable<ShareFileRange> ranges = fileClient.listRanges&
* Duration.ofSeconds&
* ranges.forEach&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRanges
*
* <p>For more information, see the
* <a href="https:
*
* @param range Optional byte range which returns file data only from the specified range.
* @param requestConditions {@link ShareRequestConditions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, ShareRequestConditions requestConditions,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions finalRequestConditions = requestConditions == null
? new ShareRequestConditions() : requestConditions;
String rangeString = range == null ? null : range.toString();
try {
Supplier<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
null, null, rangeString, finalRequestConditions.getLeaseId(), null, finalContext);
ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
List<ShareFileRange> shareFileRangeList =
response.getValue().getRanges().stream()
.map(r -> new Range().setStart(r.getStart()).setEnd(r.getEnd()))
.map(ShareFileRange::new).collect(Collectors.toList());
Supplier<PagedResponse<ShareFileRange>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(), shareFileRangeList, null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiff
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiff&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiff
*
* <p>For more information, see the
* <a href="https:
*
* @param previousSnapshot Specifies that the response will contain only ranges that were changed between target
* file and previous snapshot. Changed ranges include both updated and cleared ranges. The target file may be a
* snapshot, as long as the snapshot specified by previousSnapshot is the older of the two.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileRangeList listRangesDiff(String previousSnapshot) {
return this.listRangesDiffWithResponse(new ShareFileListRangesDiffOptions(previousSnapshot), null, Context.NONE)
.getValue();
}
/**
* List of valid ranges for a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all ranges within the file range from 1KB to 2KB.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
* <pre>
* ShareFileRangeList rangeList = fileClient.listRangesDiffWithResponse&
* new ShareFileListRangesDiffOptions&
* .setRange&
* System.out.println&
* for &
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param options {@link ShareFileListRangesDiffOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link ShareFileRange ranges} in the files that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileRangeList> listRangesDiffWithResponse(ShareFileListRangesDiffOptions options,
Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions requestConditions = options.getRequestConditions() == null
? new ShareRequestConditions() : options.getRequestConditions();
String rangeString = options.getRange() == null ? null : options.getRange().toString();
Callable<ResponseBase<FilesGetRangeListHeaders, ShareFileRangeList>> operation = () ->
this.azureFileStorageClient.getFiles().getRangeListWithResponse(shareName, filePath, snapshot,
options.getPreviousSnapshot(), null, rangeString, requestConditions.getLeaseId(), options.isRenameIncluded(), finalContext);
return StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List all handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles -->
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles -->
*
* <p>For more information, see the
* <a href="https:
*
* @return {@link HandleItem handles} in the files that satisfy the requirements
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles() {
return listHandles(null, null, Context.NONE);
}
/**
* List of open handles on a file.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List 10 handles for the file client.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.listHandles
* <pre>
* fileClient.listHandles&
* .forEach&
* handleItem.getHandleId&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.listHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param maxResultsPerPage Optional max number of results returned per page
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return {@link HandleItem handles} in the file that satisfy the requirements
* @throws RuntimeException if the operation doesn't complete before the timeout concludes.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesListHandlesHeaders, ListHandlesResponse>> operation = () ->
this.azureFileStorageClient.getFiles().listHandlesWithResponse(shareName, filePath, null,
maxResultsPerPage, null, snapshot, finalContext);
ResponseBase<FilesListHandlesHeaders, ListHandlesResponse> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<HandleItem>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(),
response.getHeaders(),
ModelHelper.transformHandleItems(response.getValue().getHandleList()),
null,
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse);
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandle
* <pre>
* fileClient.listHandles&
* fileClient.forceCloseHandle&
* System.out.printf&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandle
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @return Information about the closed handles.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseHandle(String handleId) {
return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue();
}
/**
* Closes a handle on the file at the service. This is intended to be used alongside {@link
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close handles returned by list handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
* <pre>
* fileClient.listHandles&
* Response<CloseHandlesInfo> closeResponse = fileClient
* .forceCloseHandleWithResponse&
* System.out.printf&
* handleItem.getHandleId&
* &
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse
*
* <p>For more information, see the
* <a href="https:
*
* @param handleId Handle ID to be closed.
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response that contains information about the closed handles, headers and response status code.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
Callable<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, handleId,
null, null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return new SimpleResponse<>(response,
new CloseHandlesInfo(response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed()));
}
/**
* Closes all handles opened on the file at the service.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Force close all handles.</p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
* <pre>
* CloseHandlesInfo closeHandlesInfo = fileClient.forceCloseAllHandles&
* System.out.printf&
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles
*
* <p>For more information, see the
* <a href="https:
*
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return Information about the closed handles
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) {
Context finalContext = context == null ? Context.NONE : context;
try {
Supplier<ResponseBase<FilesForceCloseHandlesHeaders, Void>> operation = () ->
this.azureFileStorageClient.getFiles().forceCloseHandlesWithResponse(shareName, filePath, "*", null,
null, snapshot, finalContext);
ResponseBase<FilesForceCloseHandlesHeaders, Void> response = timeout != null
? THREAD_POOL.submit(operation::get).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get();
Supplier<PagedResponse<CloseHandlesInfo>> finalResponse = () -> new PagedResponseBase<>(response.getRequest(),
response.getStatusCode(), response.getHeaders(),
Collections.singletonList(new CloseHandlesInfo(
response.getDeserializedHeaders().getXMsNumberOfHandlesClosed(),
response.getDeserializedHeaders().getXMsNumberOfHandlesFailed())),
response.getDeserializedHeaders().getXMsMarker(),
response.getDeserializedHeaders());
return new PagedIterable<>(finalResponse).stream().reduce(new CloseHandlesInfo(0, 0),
(accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles(),
accu.getFailedHandles() + next.getFailedHandles()));
} catch (RuntimeException e) {
throw LOGGER.logExceptionAsError(e);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw LOGGER.logExceptionAsError(new RuntimeException(e));
}
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.rename
* <pre>
* ShareFileClient renamedClient = client.rename&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.rename
*
* @param destinationPath Relative path from the share to rename the file to.
* @return A {@link ShareFileClient} used to interact with the new file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public ShareFileClient rename(String destinationPath) {
return renameWithResponse(new ShareFileRenameOptions(destinationPath), null, Context.NONE).getValue();
}
/**
* Moves the file to another location within the share.
* For more information see the
* <a href="https:
* Docs</a>.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.renameWithResponse
* <pre>
* FileSmbProperties smbProperties = new FileSmbProperties&
* .setNtfsFileAttributes&
* .setFileCreationTime&
* .setFileLastWriteTime&
* .setFilePermissionKey&
* ShareFileRenameOptions options = new ShareFileRenameOptions&
* .setDestinationRequestConditions&
* .setSourceRequestConditions&
* .setIgnoreReadOnly&
* .setReplaceIfExists&
* .setFilePermission&
* .setSmbProperties&
*
* ShareFileClient newRenamedClient = client.renameWithResponse&
* .getValue&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.renameWithResponse
*
* @param options {@link ShareFileRenameOptions}
* @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout
* concludes a {@link RuntimeException} will be thrown.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A {@link Response} whose {@link Response
* interact with the file created.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<ShareFileClient> renameWithResponse(ShareFileRenameOptions options, Duration timeout,
Context context) {
StorageImplUtils.assertNotNull("options", options);
Context finalContext = context == null ? Context.NONE : context;
ShareRequestConditions sourceRequestConditions = options.getSourceRequestConditions() == null
? new ShareRequestConditions() : options.getSourceRequestConditions();
ShareRequestConditions destinationRequestConditions = options.getDestinationRequestConditions() == null
? new ShareRequestConditions() : options.getDestinationRequestConditions();
SourceLeaseAccessConditions sourceConditions = new SourceLeaseAccessConditions()
.setSourceLeaseId(sourceRequestConditions.getLeaseId());
DestinationLeaseAccessConditions destinationConditions = new DestinationLeaseAccessConditions()
.setDestinationLeaseId(destinationRequestConditions.getLeaseId());
CopyFileSmbInfo smbInfo = null;
String filePermissionKey = null;
if (options.getSmbProperties() != null) {
FileSmbProperties tempSmbProperties = options.getSmbProperties();
filePermissionKey = tempSmbProperties.getFilePermissionKey();
String fileAttributes = NtfsFileAttributes.toString(tempSmbProperties.getNtfsFileAttributes());
String fileCreationTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileCreationTime());
String fileLastWriteTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileLastWriteTime());
String fileChangeTime = FileSmbProperties.parseFileSMBDate(tempSmbProperties.getFileChangeTime());
smbInfo = new CopyFileSmbInfo()
.setFileAttributes(fileAttributes)
.setFileCreationTime(fileCreationTime)
.setFileLastWriteTime(fileLastWriteTime)
.setFileChangeTime(fileChangeTime)
.setIgnoreReadOnly(options.isIgnoreReadOnly());
}
CopyFileSmbInfo finalSmbInfo = smbInfo;
String finalFilePermissionKey = filePermissionKey;
ShareFileClient destinationFileClient = getFileClient(options.getDestinationPath());
ShareFileHttpHeaders headers = options.getContentType() == null ? null
: new ShareFileHttpHeaders().setContentType(options.getContentType());
String renameSource = Utility.encodeUrlPath(this.getFileUrl());
String finalRenameSource = this.sasToken != null ? renameSource + "?" + this.sasToken.getSignature() : renameSource;
Callable<ResponseBase<FilesRenameHeaders, Void>> operation = () ->
destinationFileClient.azureFileStorageClient.getFiles().renameWithResponse(
destinationFileClient.getShareName(), destinationFileClient.getFilePath(), finalRenameSource,
null /* timeout */, options.getReplaceIfExists(), options.isIgnoreReadOnly(),
options.getFilePermission(), finalFilePermissionKey, options.getMetadata(), sourceConditions,
destinationConditions, finalSmbInfo, headers, finalContext);
ResponseBase<FilesRenameHeaders, Void> response = StorageImplUtils.sendRequest(operation, timeout, ShareStorageException.class);
return new SimpleResponse<>(response, destinationFileClient);
}
ShareFileClient getFileClient(String destinationPath) {
if (CoreUtils.isNullOrEmpty(destinationPath)) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'destinationPath' can not be set to null"));
}
return new ShareFileClient(shareFileAsyncClient, this.azureFileStorageClient, getShareName(), destinationPath, null,
this.getAccountName(), this.getServiceVersion(), this.getSasToken());
}
/**
* Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Get the share snapshot id. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
* <pre>
* OffsetDateTime currentTime = OffsetDateTime.of&
* ShareFileClient fileClient = new ShareFileClientBuilder&
* .endpoint&
* .sasToken&
* .shareName&
* .resourcePath&
* .snapshot&
* .buildFileClient&
*
* System.out.printf&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareSnapshotId -->
*
* @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base
* share.
*/
public String getShareSnapshotId() {
return this.snapshot;
}
/**
* Get the share name of file client.
*
* <p>Get the share name. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getShareName -->
* <pre>
* String shareName = fileClient.getShareName&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getShareName -->
*
* @return The share name of the file.
*/
public String getShareName() {
return shareName;
}
/**
* Get file path of the client.
*
* <p>Get the file path. </p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.getFilePath -->
* <pre>
* String filePath = fileClient.getFilePath&
* System.out.println&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.getFilePath -->
*
* @return The path of the file.
*/
public String getFilePath() {
return filePath;
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return this.accountName;
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return azureFileStorageClient.getHttpPipeline();
}
AzureSasCredential getSasToken() {
return sasToken;
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues) {
return generateSas(shareServiceSasSignatureValues, Context.NONE);
}
/**
* Generates a service SAS for the file using the specified {@link ShareServiceSasSignatureValues}
* <p>Note : The client must be authenticated via {@link StorageSharedKeyCredential}
* <p>See {@link ShareServiceSasSignatureValues} for more information on how to construct a service SAS.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.share.ShareFileClient.generateSas
* <pre>
* OffsetDateTime expiryTime = OffsetDateTime.now&
* ShareFileSasPermission permission = new ShareFileSasPermission&
*
* ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues&
* .setStartTime&
*
* &
* shareFileClient.generateSas&
* </pre>
* <!-- end com.azure.storage.file.share.ShareFileClient.generateSas
*
* @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues}
* @param context Additional context that is passed through the code when generating a SAS.
*
* @return A {@code String} representing the SAS query parameters.
*/
public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatureValues, Context context) {
return new ShareSasImplUtil(shareServiceSasSignatureValues, getShareName(), getFilePath())
.generateSas(SasImplUtils.extractSharedKeyCredential(getHttpPipeline()), context);
}
} |
todo: store logger in local variable to avoid possible race conditions if logger is updated on line 169 | public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
if (logger == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD.invoke(logger, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD.invoke(logger, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD.invoke(logger, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD.invoke(logger, message, throwable);
break;
default:
break;
}
} catch (ReflectiveOperationException e) {
defaultLogger.log(WARNING, "Failed to log message, SLF4J logging will be disabled", e);
logger = null;
}
} | try { | public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
Object slf4jLoggerCopy = this.slf4jLogger;
if (slf4jLoggerCopy == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to log message with SLF4J", e);
slf4jLogger = null;
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final Method LOGGER_FACTORY_GET_LOGGER_METHOD;
private static final Method LOGGER_VERBOSE_METHOD;
private static final Method LOGGER_INFO_METHOD;
private static final Method LOGGER_WARN_METHOD;
private static final Method LOGGER_ERROR_METHOD;
private static final Method LOGGER_IS_VERBOSE_ENABLED_METHOD;
private static final Method LOGGER_IS_INFO_ENABLED_METHOD;
private static final Method LOGGER_IS_WARN_ENABLED_METHOD;
private static final Method LOGGER_IS_ERROR_ENABLED_METHOD;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object logger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Method getLoggerMethod;
Class<?> nopLoggerClass;
Method logVerboseMethod;
Method logInfoMethod;
Method logWarnMethod;
Method logErrorMethod;
Method isVerboseEnabledMethod;
Method isInfoEnabledMethod;
Method isWarnEnabledMethod;
Method isErrorEnabledMethod;
try {
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true, Slf4jLoggerShim.class.getClassLoader());
getLoggerMethod = loggerFactoryClass.getMethod("getLogger", String.class);
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
logVerboseMethod = loggerClass.getMethod("debug", String.class, Throwable.class);
logInfoMethod = loggerClass.getMethod("info", String.class, Throwable.class);
logWarnMethod = loggerClass.getMethod("warn", String.class, Throwable.class);
logErrorMethod = loggerClass.getMethod("error", String.class, Throwable.class);
isVerboseEnabledMethod = loggerClass.getMethod("isDebugEnabled");
isInfoEnabledMethod = loggerClass.getMethod("isInfoEnabled");
isWarnEnabledMethod = loggerClass.getMethod("isWarnEnabled");
isErrorEnabledMethod = loggerClass.getMethod("isErrorEnabled");
} catch (ClassNotFoundException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim", e);
nopLoggerClass = null;
getLoggerMethod = null;
logVerboseMethod = null;
logInfoMethod = null;
logWarnMethod = null;
logErrorMethod = null;
isVerboseEnabledMethod = null;
isInfoEnabledMethod = null;
isWarnEnabledMethod = null;
isErrorEnabledMethod = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD = getLoggerMethod;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD = logVerboseMethod;
LOGGER_INFO_METHOD = logInfoMethod;
LOGGER_WARN_METHOD = logWarnMethod;
LOGGER_ERROR_METHOD = logErrorMethod;
LOGGER_IS_VERBOSE_ENABLED_METHOD = isVerboseEnabledMethod;
LOGGER_IS_INFO_ENABLED_METHOD = isInfoEnabledMethod;
LOGGER_IS_WARN_ENABLED_METHOD = isWarnEnabledMethod;
LOGGER_IS_ERROR_ENABLED_METHOD = isErrorEnabledMethod;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.logger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
isVerboseEnabled = isSlf4JEnabledAtLevel(logger, VERBOSE) | defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = isSlf4JEnabledAtLevel(logger, INFORMATIONAL) | defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = isSlf4JEnabledAtLevel(logger, WARNING) | defaultLogger.isEnabled(WARNING);
isErrorEnabled = isSlf4JEnabledAtLevel(logger, ERROR) | defaultLogger.isEnabled(ERROR);
} catch (ReflectiveOperationException e) {
logger = null;
DEFAULT_LOGGER.log(WARNING, "Failed to initialize Slf4jLoggerShim", e);
isVerboseEnabled = defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = defaultLogger.isEnabled(WARNING);
isErrorEnabled = defaultLogger.isEnabled(ERROR);
}
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD.invoke(null, className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
DEFAULT_LOGGER.log(VERBOSE,
"Resolved NOPLogger, SLF4J logging will be disabled", null);
return null;
}
return logger;
} catch (ReflectiveOperationException e) {
DEFAULT_LOGGER.log(WARNING,
"Failed to create SLF4J logger, SLF4J logging will be disabled", e);
return null;
}
}
private static boolean isSlf4JEnabledAtLevel(Object logger, ClientLogger.LogLevel logLevel) throws ReflectiveOperationException {
if (logger == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return (boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD.invoke(logger);
case INFORMATIONAL:
return (boolean) LOGGER_IS_INFO_ENABLED_METHOD.invoke(logger);
case WARNING:
return (boolean) LOGGER_IS_WARN_ENABLED_METHOD.invoke(logger);
case ERROR:
return (boolean) LOGGER_IS_ERROR_ENABLED_METHOD.invoke(logger);
default:
return false;
}
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getLoggerMethodHandle = lookup.unreflect(loggerFactoryClass.getMethod("getLogger", String.class));
logVerboseMethodHandle = lookup.unreflect(loggerClass.getMethod("debug", String.class, Throwable.class));
logInfoMethodHandle = lookup.unreflect(loggerClass.getMethod("info", String.class, Throwable.class));
logWarnMethodHandle = lookup.unreflect(loggerClass.getMethod("warn", String.class, Throwable.class));
logErrorMethodHandle = lookup.unreflect(loggerClass.getMethod("error", String.class, Throwable.class));
isVerboseEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isDebugEnabled"));
isInfoEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isInfoEnabled"));
isWarnEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isWarnEnabled"));
isErrorEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isErrorEnabled"));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
public Slf4jLoggerShim(Class<?> clazz) {
this(clazz.getName(), new DefaultLogger(clazz));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
if (slf4jLogger != null) {
isVerboseEnabled |= (Boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, VERBOSE);
isInfoEnabled |= (Boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, INFORMATIONAL);
isWarnEnabled |= (Boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, WARNING);
isErrorEnabled |= (Boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, ERROR);
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to check if SLF4J log level is enabled", e);
slf4jLogger = null;
}
isVerboseEnabled |= defaultLogger.isEnabled(VERBOSE);
isInfoEnabled |= defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled |= defaultLogger.isEnabled(WARNING);
isErrorEnabled |= defaultLogger.isEnabled(ERROR);
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
writeSlf4jDisabledError(VERBOSE, "Resolved NOPLogger", null);
return null;
}
return logger;
} catch (Throwable e) {
writeSlf4jDisabledError(WARNING, "Failed to create SLF4J logger", e);
return null;
}
}
private static void writeSlf4jDisabledError(ClientLogger.LogLevel level, String message, Throwable throwable) {
DEFAULT_LOGGER.log(level, String.format("[DefaultLogger]: %s. SLF4J logging will be disabled.", message),
throwable);
}
} |
Only need to check one of these as if SLF4J isn't reflectively supported both will be null | private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
DEFAULT_LOGGER.log(VERBOSE, "Resolved NOPLogger, SLF4J logging will be disabled.", null);
return null;
}
return logger;
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to create SLF4J logger, SLF4J logging will be disabled.", e);
return null;
}
} | if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) { | private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
writeSlf4jDisabledError(VERBOSE, "Resolved NOPLogger", null);
return null;
}
return logger;
} catch (Throwable e) {
writeSlf4jDisabledError(WARNING, "Failed to create SLF4J logger", e);
return null;
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
MethodType getLoggerMethodType = MethodType.methodType(loggerClass, String.class);
getLoggerMethodHandle = lookup.findStatic(loggerFactoryClass, "getLogger", getLoggerMethodType);
MethodType logMethodType = MethodType.methodType(void.class, String.class, Throwable.class);
logVerboseMethodHandle = lookup.findVirtual(loggerClass, "debug", logMethodType);
logInfoMethodHandle = lookup.findVirtual(loggerClass, "info", logMethodType);
logWarnMethodHandle = lookup.findVirtual(loggerClass, "warn", logMethodType);
logErrorMethodHandle = lookup.findVirtual(loggerClass, "error", logMethodType);
MethodType isEnabledMethodType = MethodType.methodType(boolean.class);
isVerboseEnabledMethodHandle = lookup.findVirtual(loggerClass, "isDebugEnabled", isEnabledMethodType);
isInfoEnabledMethodHandle = lookup.findVirtual(loggerClass, "isInfoEnabled", isEnabledMethodType);
isWarnEnabledMethodHandle = lookup.findVirtual(loggerClass, "isWarnEnabled", isEnabledMethodType);
isErrorEnabledMethodHandle = lookup.findVirtual(loggerClass, "isErrorEnabled", isEnabledMethodType);
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
isVerboseEnabled = isSlf4jEnabledAtLevel(slf4jLogger, VERBOSE) || defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = isSlf4jEnabledAtLevel(slf4jLogger, INFORMATIONAL) || defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = isSlf4jEnabledAtLevel(slf4jLogger, WARNING) || defaultLogger.isEnabled(WARNING);
isErrorEnabled = isSlf4jEnabledAtLevel(slf4jLogger, ERROR) || defaultLogger.isEnabled(ERROR);
} catch (ReflectiveOperationException e) {
slf4jLogger = null;
DEFAULT_LOGGER.log(WARNING, "Failed to initialize Slf4jLoggerShim.", e);
isVerboseEnabled = defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = defaultLogger.isEnabled(WARNING);
isErrorEnabled = defaultLogger.isEnabled(ERROR);
}
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
if (slf4jLogger == null) {
defaultLogger.log(logLevel, message, null);
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
defaultLogger.log(WARNING, "Failed to log message, SLF4J logging will be disabled.", e);
slf4jLogger = null;
}
}
private static boolean isSlf4jEnabledAtLevel(Object logger, ClientLogger.LogLevel logLevel)
throws ReflectiveOperationException {
if (logger == null) {
return false;
}
try {
switch (logLevel) {
case VERBOSE:
return (boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(logger);
case INFORMATIONAL:
return (boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(logger);
case WARNING:
return (boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(logger);
case ERROR:
return (boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(logger);
default:
return false;
}
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to check if log level is enabled, SLF4J logging will be disabled.", e);
return false;
}
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getLoggerMethodHandle = lookup.unreflect(loggerFactoryClass.getMethod("getLogger", String.class));
logVerboseMethodHandle = lookup.unreflect(loggerClass.getMethod("debug", String.class, Throwable.class));
logInfoMethodHandle = lookup.unreflect(loggerClass.getMethod("info", String.class, Throwable.class));
logWarnMethodHandle = lookup.unreflect(loggerClass.getMethod("warn", String.class, Throwable.class));
logErrorMethodHandle = lookup.unreflect(loggerClass.getMethod("error", String.class, Throwable.class));
isVerboseEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isDebugEnabled"));
isInfoEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isInfoEnabled"));
isWarnEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isWarnEnabled"));
isErrorEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isErrorEnabled"));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
public Slf4jLoggerShim(Class<?> clazz) {
this(clazz.getName(), new DefaultLogger(clazz));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
if (slf4jLogger != null) {
isVerboseEnabled |= (Boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, VERBOSE);
isInfoEnabled |= (Boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, INFORMATIONAL);
isWarnEnabled |= (Boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, WARNING);
isErrorEnabled |= (Boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, ERROR);
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to check if SLF4J log level is enabled", e);
slf4jLogger = null;
}
isVerboseEnabled |= defaultLogger.isEnabled(VERBOSE);
isInfoEnabled |= defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled |= defaultLogger.isEnabled(WARNING);
isErrorEnabled |= defaultLogger.isEnabled(ERROR);
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
Object slf4jLoggerCopy = this.slf4jLogger;
if (slf4jLoggerCopy == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to log message with SLF4J", e);
slf4jLogger = null;
}
}
private static void writeSlf4jDisabledError(ClientLogger.LogLevel level, String message, Throwable throwable) {
DEFAULT_LOGGER.log(level, String.format("[DefaultLogger]: %s. SLF4J logging will be disabled.", message),
throwable);
}
} |
We probably should log the message that should have been logged using the default logger. Right now, if SLF4J is on the classpath but cannot be used due to reflective restrictions we're going to drop log messages. | public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
if (slf4jLogger == null) {
defaultLogger.log(logLevel, message, null);
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
defaultLogger.log(WARNING, "Failed to log message, SLF4J logging will be disabled.", e);
slf4jLogger = null;
}
} | defaultLogger.log(WARNING, "Failed to log message, SLF4J logging will be disabled.", e); | public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
Object slf4jLoggerCopy = this.slf4jLogger;
if (slf4jLoggerCopy == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to log message with SLF4J", e);
slf4jLogger = null;
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
MethodType getLoggerMethodType = MethodType.methodType(loggerClass, String.class);
getLoggerMethodHandle = lookup.findStatic(loggerFactoryClass, "getLogger", getLoggerMethodType);
MethodType logMethodType = MethodType.methodType(void.class, String.class, Throwable.class);
logVerboseMethodHandle = lookup.findVirtual(loggerClass, "debug", logMethodType);
logInfoMethodHandle = lookup.findVirtual(loggerClass, "info", logMethodType);
logWarnMethodHandle = lookup.findVirtual(loggerClass, "warn", logMethodType);
logErrorMethodHandle = lookup.findVirtual(loggerClass, "error", logMethodType);
MethodType isEnabledMethodType = MethodType.methodType(boolean.class);
isVerboseEnabledMethodHandle = lookup.findVirtual(loggerClass, "isDebugEnabled", isEnabledMethodType);
isInfoEnabledMethodHandle = lookup.findVirtual(loggerClass, "isInfoEnabled", isEnabledMethodType);
isWarnEnabledMethodHandle = lookup.findVirtual(loggerClass, "isWarnEnabled", isEnabledMethodType);
isErrorEnabledMethodHandle = lookup.findVirtual(loggerClass, "isErrorEnabled", isEnabledMethodType);
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
isVerboseEnabled = isSlf4jEnabledAtLevel(slf4jLogger, VERBOSE) || defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = isSlf4jEnabledAtLevel(slf4jLogger, INFORMATIONAL) || defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = isSlf4jEnabledAtLevel(slf4jLogger, WARNING) || defaultLogger.isEnabled(WARNING);
isErrorEnabled = isSlf4jEnabledAtLevel(slf4jLogger, ERROR) || defaultLogger.isEnabled(ERROR);
} catch (ReflectiveOperationException e) {
slf4jLogger = null;
DEFAULT_LOGGER.log(WARNING, "Failed to initialize Slf4jLoggerShim.", e);
isVerboseEnabled = defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = defaultLogger.isEnabled(WARNING);
isErrorEnabled = defaultLogger.isEnabled(ERROR);
}
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
DEFAULT_LOGGER.log(VERBOSE, "Resolved NOPLogger, SLF4J logging will be disabled.", null);
return null;
}
return logger;
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to create SLF4J logger, SLF4J logging will be disabled.", e);
return null;
}
}
private static boolean isSlf4jEnabledAtLevel(Object logger, ClientLogger.LogLevel logLevel)
throws ReflectiveOperationException {
if (logger == null) {
return false;
}
try {
switch (logLevel) {
case VERBOSE:
return (boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(logger);
case INFORMATIONAL:
return (boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(logger);
case WARNING:
return (boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(logger);
case ERROR:
return (boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(logger);
default:
return false;
}
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to check if log level is enabled, SLF4J logging will be disabled.", e);
return false;
}
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getLoggerMethodHandle = lookup.unreflect(loggerFactoryClass.getMethod("getLogger", String.class));
logVerboseMethodHandle = lookup.unreflect(loggerClass.getMethod("debug", String.class, Throwable.class));
logInfoMethodHandle = lookup.unreflect(loggerClass.getMethod("info", String.class, Throwable.class));
logWarnMethodHandle = lookup.unreflect(loggerClass.getMethod("warn", String.class, Throwable.class));
logErrorMethodHandle = lookup.unreflect(loggerClass.getMethod("error", String.class, Throwable.class));
isVerboseEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isDebugEnabled"));
isInfoEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isInfoEnabled"));
isWarnEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isWarnEnabled"));
isErrorEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isErrorEnabled"));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
public Slf4jLoggerShim(Class<?> clazz) {
this(clazz.getName(), new DefaultLogger(clazz));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
if (slf4jLogger != null) {
isVerboseEnabled |= (Boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, VERBOSE);
isInfoEnabled |= (Boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, INFORMATIONAL);
isWarnEnabled |= (Boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, WARNING);
isErrorEnabled |= (Boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, ERROR);
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to check if SLF4J log level is enabled", e);
slf4jLogger = null;
}
isVerboseEnabled |= defaultLogger.isEnabled(VERBOSE);
isInfoEnabled |= defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled |= defaultLogger.isEnabled(WARNING);
isErrorEnabled |= defaultLogger.isEnabled(ERROR);
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
writeSlf4jDisabledError(VERBOSE, "Resolved NOPLogger", null);
return null;
}
return logger;
} catch (Throwable e) {
writeSlf4jDisabledError(WARNING, "Failed to create SLF4J logger", e);
return null;
}
}
private static void writeSlf4jDisabledError(ClientLogger.LogLevel level, String message, Throwable throwable) {
DEFAULT_LOGGER.log(level, String.format("[DefaultLogger]: %s. SLF4J logging will be disabled.", message),
throwable);
}
} |
this seems safer in case anything changes. | private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
DEFAULT_LOGGER.log(VERBOSE, "Resolved NOPLogger, SLF4J logging will be disabled.", null);
return null;
}
return logger;
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to create SLF4J logger, SLF4J logging will be disabled.", e);
return null;
}
} | if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) { | private static Object createLogger(String className) {
if (LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE == null || NOP_LOGGER_CLASS == null) {
return null;
}
try {
Object logger = LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE.invoke(className);
if (NOP_LOGGER_CLASS.isAssignableFrom(logger.getClass())) {
writeSlf4jDisabledError(VERBOSE, "Resolved NOPLogger", null);
return null;
}
return logger;
} catch (Throwable e) {
writeSlf4jDisabledError(WARNING, "Failed to create SLF4J logger", e);
return null;
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
MethodType getLoggerMethodType = MethodType.methodType(loggerClass, String.class);
getLoggerMethodHandle = lookup.findStatic(loggerFactoryClass, "getLogger", getLoggerMethodType);
MethodType logMethodType = MethodType.methodType(void.class, String.class, Throwable.class);
logVerboseMethodHandle = lookup.findVirtual(loggerClass, "debug", logMethodType);
logInfoMethodHandle = lookup.findVirtual(loggerClass, "info", logMethodType);
logWarnMethodHandle = lookup.findVirtual(loggerClass, "warn", logMethodType);
logErrorMethodHandle = lookup.findVirtual(loggerClass, "error", logMethodType);
MethodType isEnabledMethodType = MethodType.methodType(boolean.class);
isVerboseEnabledMethodHandle = lookup.findVirtual(loggerClass, "isDebugEnabled", isEnabledMethodType);
isInfoEnabledMethodHandle = lookup.findVirtual(loggerClass, "isInfoEnabled", isEnabledMethodType);
isWarnEnabledMethodHandle = lookup.findVirtual(loggerClass, "isWarnEnabled", isEnabledMethodType);
isErrorEnabledMethodHandle = lookup.findVirtual(loggerClass, "isErrorEnabled", isEnabledMethodType);
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
isVerboseEnabled = isSlf4jEnabledAtLevel(slf4jLogger, VERBOSE) || defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = isSlf4jEnabledAtLevel(slf4jLogger, INFORMATIONAL) || defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = isSlf4jEnabledAtLevel(slf4jLogger, WARNING) || defaultLogger.isEnabled(WARNING);
isErrorEnabled = isSlf4jEnabledAtLevel(slf4jLogger, ERROR) || defaultLogger.isEnabled(ERROR);
} catch (ReflectiveOperationException e) {
slf4jLogger = null;
DEFAULT_LOGGER.log(WARNING, "Failed to initialize Slf4jLoggerShim.", e);
isVerboseEnabled = defaultLogger.isEnabled(VERBOSE);
isInfoEnabled = defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled = defaultLogger.isEnabled(WARNING);
isErrorEnabled = defaultLogger.isEnabled(ERROR);
}
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
if (slf4jLogger == null) {
defaultLogger.log(logLevel, message, null);
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLogger, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
defaultLogger.log(WARNING, "Failed to log message, SLF4J logging will be disabled.", e);
slf4jLogger = null;
}
}
private static boolean isSlf4jEnabledAtLevel(Object logger, ClientLogger.LogLevel logLevel)
throws ReflectiveOperationException {
if (logger == null) {
return false;
}
try {
switch (logLevel) {
case VERBOSE:
return (boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(logger);
case INFORMATIONAL:
return (boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(logger);
case WARNING:
return (boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(logger);
case ERROR:
return (boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(logger);
default:
return false;
}
} catch (Throwable e) {
DEFAULT_LOGGER.log(WARNING, "Failed to check if log level is enabled, SLF4J logging will be disabled.", e);
return false;
}
}
} | class Slf4jLoggerShim {
private static final DefaultLogger DEFAULT_LOGGER = new DefaultLogger(Slf4jLoggerShim.class);
private static final MethodHandle LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE;
private static final MethodHandle LOGGER_VERBOSE_METHOD_HANDLE;
private static final MethodHandle LOGGER_INFO_METHOD_HANDLE;
private static final MethodHandle LOGGER_WARN_METHOD_HANDLE;
private static final MethodHandle LOGGER_ERROR_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_INFO_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_WARN_ENABLED_METHOD_HANDLE;
private static final MethodHandle LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE;
private static final Class<?> NOP_LOGGER_CLASS;
private final DefaultLogger defaultLogger;
private Object slf4jLogger;
private boolean isVerboseEnabled;
private boolean isInfoEnabled;
private boolean isWarnEnabled;
private boolean isErrorEnabled;
static {
Class<?> nopLoggerClass;
MethodHandle getLoggerMethodHandle;
MethodHandle logVerboseMethodHandle;
MethodHandle logInfoMethodHandle;
MethodHandle logWarnMethodHandle;
MethodHandle logErrorMethodHandle;
MethodHandle isVerboseEnabledMethodHandle;
MethodHandle isInfoEnabledMethodHandle;
MethodHandle isWarnEnabledMethodHandle;
MethodHandle isErrorEnabledMethodHandle;
try {
nopLoggerClass = Class.forName("org.slf4j.helpers.NOPLogger", true, Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerFactoryClass = Class.forName("org.slf4j.LoggerFactory", true,
Slf4jLoggerShim.class.getClassLoader());
Class<?> loggerClass = Class.forName("org.slf4j.Logger", true, Slf4jLoggerShim.class.getClassLoader());
MethodHandles.Lookup lookup = MethodHandles.publicLookup();
getLoggerMethodHandle = lookup.unreflect(loggerFactoryClass.getMethod("getLogger", String.class));
logVerboseMethodHandle = lookup.unreflect(loggerClass.getMethod("debug", String.class, Throwable.class));
logInfoMethodHandle = lookup.unreflect(loggerClass.getMethod("info", String.class, Throwable.class));
logWarnMethodHandle = lookup.unreflect(loggerClass.getMethod("warn", String.class, Throwable.class));
logErrorMethodHandle = lookup.unreflect(loggerClass.getMethod("error", String.class, Throwable.class));
isVerboseEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isDebugEnabled"));
isInfoEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isInfoEnabled"));
isWarnEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isWarnEnabled"));
isErrorEnabledMethodHandle = lookup.unreflect(loggerClass.getMethod("isErrorEnabled"));
} catch (ClassNotFoundException | IllegalAccessException | NoSuchMethodException e) {
DEFAULT_LOGGER.log(VERBOSE, "Failed to initialize Slf4jLoggerShim.", e);
nopLoggerClass = null;
getLoggerMethodHandle = null;
logVerboseMethodHandle = null;
logInfoMethodHandle = null;
logWarnMethodHandle = null;
logErrorMethodHandle = null;
isVerboseEnabledMethodHandle = null;
isInfoEnabledMethodHandle = null;
isWarnEnabledMethodHandle = null;
isErrorEnabledMethodHandle = null;
}
LOGGER_FACTORY_GET_LOGGER_METHOD_HANDLE = getLoggerMethodHandle;
NOP_LOGGER_CLASS = nopLoggerClass;
LOGGER_VERBOSE_METHOD_HANDLE = logVerboseMethodHandle;
LOGGER_INFO_METHOD_HANDLE = logInfoMethodHandle;
LOGGER_WARN_METHOD_HANDLE = logWarnMethodHandle;
LOGGER_ERROR_METHOD_HANDLE = logErrorMethodHandle;
LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE = isVerboseEnabledMethodHandle;
LOGGER_IS_INFO_ENABLED_METHOD_HANDLE = isInfoEnabledMethodHandle;
LOGGER_IS_WARN_ENABLED_METHOD_HANDLE = isWarnEnabledMethodHandle;
LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE = isErrorEnabledMethodHandle;
}
public Slf4jLoggerShim(DefaultLogger defaultLogger) {
this(null, defaultLogger);
}
public Slf4jLoggerShim(String className) {
this(className, new DefaultLogger(className));
}
public Slf4jLoggerShim(Class<?> clazz) {
this(clazz.getName(), new DefaultLogger(clazz));
}
private Slf4jLoggerShim(String className, DefaultLogger defaultLogger) {
this.slf4jLogger = createLogger(className);
this.defaultLogger = defaultLogger;
try {
if (slf4jLogger != null) {
isVerboseEnabled |= (Boolean) LOGGER_IS_VERBOSE_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, VERBOSE);
isInfoEnabled |= (Boolean) LOGGER_IS_INFO_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, INFORMATIONAL);
isWarnEnabled |= (Boolean) LOGGER_IS_WARN_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, WARNING);
isErrorEnabled |= (Boolean) LOGGER_IS_ERROR_ENABLED_METHOD_HANDLE.invoke(slf4jLogger, ERROR);
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to check if SLF4J log level is enabled", e);
slf4jLogger = null;
}
isVerboseEnabled |= defaultLogger.isEnabled(VERBOSE);
isInfoEnabled |= defaultLogger.isEnabled(INFORMATIONAL);
isWarnEnabled |= defaultLogger.isEnabled(WARNING);
isErrorEnabled |= defaultLogger.isEnabled(ERROR);
}
public boolean canLogAtLevel(ClientLogger.LogLevel logLevel) {
if (logLevel == null) {
return false;
}
switch (logLevel) {
case VERBOSE:
return isVerboseEnabled;
case INFORMATIONAL:
return isInfoEnabled;
case WARNING:
return isWarnEnabled;
case ERROR:
return isErrorEnabled;
default:
return false;
}
}
public void performLogging(ClientLogger.LogLevel logLevel, String message, Throwable throwable) {
if (!canLogAtLevel(logLevel)) {
return;
}
defaultLogger.log(logLevel, message, null);
Object slf4jLoggerCopy = this.slf4jLogger;
if (slf4jLoggerCopy == null) {
return;
}
try {
switch (logLevel) {
case VERBOSE:
LOGGER_VERBOSE_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case INFORMATIONAL:
LOGGER_INFO_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case WARNING:
LOGGER_WARN_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
case ERROR:
LOGGER_ERROR_METHOD_HANDLE.invoke(slf4jLoggerCopy, message, throwable);
break;
default:
break;
}
} catch (Throwable e) {
writeSlf4jDisabledError(VERBOSE, "Failed to log message with SLF4J", e);
slf4jLogger = null;
}
}
private static void writeSlf4jDisabledError(ClientLogger.LogLevel level, String message, Throwable throwable) {
DEFAULT_LOGGER.log(level, String.format("[DefaultLogger]: %s. SLF4J logging will be disabled.", message),
throwable);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.